aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/debugfs-aufs55
-rw-r--r--Documentation/ABI/testing/sysfs-aufs31
-rw-r--r--Documentation/filesystems/aufs/README395
-rw-r--r--Documentation/filesystems/aufs/design/01intro.txt171
-rw-r--r--Documentation/filesystems/aufs/design/02struct.txt258
-rw-r--r--Documentation/filesystems/aufs/design/03atomic_open.txt85
-rw-r--r--Documentation/filesystems/aufs/design/03lookup.txt113
-rw-r--r--Documentation/filesystems/aufs/design/04branch.txt74
-rw-r--r--Documentation/filesystems/aufs/design/05wbr_policy.txt64
-rw-r--r--Documentation/filesystems/aufs/design/06dirren.dot31
-rw-r--r--Documentation/filesystems/aufs/design/06dirren.txt102
-rw-r--r--Documentation/filesystems/aufs/design/06fhsm.txt120
-rw-r--r--Documentation/filesystems/aufs/design/06mmap.txt72
-rw-r--r--Documentation/filesystems/aufs/design/06xattr.txt96
-rw-r--r--Documentation/filesystems/aufs/design/07export.txt58
-rw-r--r--Documentation/filesystems/aufs/design/08shwh.txt52
-rw-r--r--Documentation/filesystems/aufs/design/10dynop.txt47
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Makefile5
-rw-r--r--arch/arm/boot/Makefile4
-rw-r--r--arch/arm/kernel/entry-common.S3
-rw-r--r--arch/arm/mm/fault.c13
-rw-r--r--arch/arm/tools/gen-mach-types2
-rw-r--r--arch/mips/mm/tlbex.c3
-rw-r--r--arch/powerpc/Makefile5
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/vdso32/sigtramp.S34
-rw-r--r--arch/powerpc/mm/slb.c1
-rw-r--r--arch/sh/tools/gen-mach-types2
-rw-r--r--arch/x86/entry/common.c28
-rw-r--r--arch/x86/entry/entry_64.S4
-rw-r--r--arch/x86/entry/thunk_64.S2
-rw-r--r--arch/x86/include/asm/irqflags.h4
-rw-r--r--arch/x86/kernel/cpu/vmware.c1
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk2
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/staging/octeon/ethernet-tx.c3
-rw-r--r--drivers/usb/host/pci-quirks.c14
-rw-r--r--drivers/usb/host/uhci-q.c4
-rw-r--r--drivers/video/fbdev/uvesafb.c28
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/Makefile2
-rw-r--r--fs/Makefile.rej6
-rw-r--r--fs/aufs/Kconfig199
-rw-r--r--fs/aufs/Makefile46
-rw-r--r--fs/aufs/aufs.h62
-rw-r--r--fs/aufs/branch.c1422
-rw-r--r--fs/aufs/branch.h365
-rw-r--r--fs/aufs/conf.mk40
-rw-r--r--fs/aufs/cpup.c1458
-rw-r--r--fs/aufs/cpup.h100
-rw-r--r--fs/aufs/dbgaufs.c526
-rw-r--r--fs/aufs/dbgaufs.h53
-rw-r--r--fs/aufs/dcsub.c225
-rw-r--r--fs/aufs/dcsub.h137
-rw-r--r--fs/aufs/debug.c440
-rw-r--r--fs/aufs/debug.h226
-rw-r--r--fs/aufs/dentry.c1153
-rw-r--r--fs/aufs/dentry.h268
-rw-r--r--fs/aufs/dinfo.c554
-rw-r--r--fs/aufs/dir.c762
-rw-r--r--fs/aufs/dir.h134
-rw-r--r--fs/aufs/dirren.c1316
-rw-r--r--fs/aufs/dirren.h140
-rw-r--r--fs/aufs/dynop.c370
-rw-r--r--fs/aufs/dynop.h75
-rw-r--r--fs/aufs/export.c838
-rw-r--r--fs/aufs/f298
-rw-r--r--fs/aufs/f_op.c819
-rw-r--r--fs/aufs/fhsm.c427
-rw-r--r--fs/aufs/file.c863
-rw-r--r--fs/aufs/file.h342
-rw-r--r--fs/aufs/finfo.c149
-rw-r--r--fs/aufs/fstype.h401
-rw-r--r--fs/aufs/hbl.h65
-rw-r--r--fs/aufs/hfsnotify.c289
-rw-r--r--fs/aufs/hfsplus.c60
-rw-r--r--fs/aufs/hnotify.c720
-rw-r--r--fs/aufs/i_op.c1506
-rw-r--r--fs/aufs/i_op_add.c935
-rw-r--r--fs/aufs/i_op_del.c512
-rw-r--r--fs/aufs/i_op_ren.c1249
-rw-r--r--fs/aufs/iinfo.c286
-rw-r--r--fs/aufs/inode.c528
-rw-r--r--fs/aufs/inode.h698
-rw-r--r--fs/aufs/ioctl.c220
-rw-r--r--fs/aufs/lcnt.h186
-rw-r--r--fs/aufs/loop.c148
-rw-r--r--fs/aufs/loop.h53
-rw-r--r--fs/aufs/magic.mk31
-rw-r--r--fs/aufs/module.c273
-rw-r--r--fs/aufs/module.h166
-rw-r--r--fs/aufs/mvdown.c705
-rw-r--r--fs/aufs/opts.c1877
-rw-r--r--fs/aufs/opts.h225
-rw-r--r--fs/aufs/plink.c516
-rw-r--r--fs/aufs/poll.c51
-rw-r--r--fs/aufs/posix_acl.c103
-rw-r--r--fs/aufs/procfs.c171
-rw-r--r--fs/aufs/rdu.c382
-rw-r--r--fs/aufs/rwsem.h73
-rw-r--r--fs/aufs/sbinfo.c313
-rw-r--r--fs/aufs/super.c1048
-rw-r--r--fs/aufs/super.h589
-rw-r--r--fs/aufs/sysaufs.c93
-rw-r--r--fs/aufs/sysaufs.h102
-rw-r--r--fs/aufs/sysfs.c373
-rw-r--r--fs/aufs/sysrq.c160
-rw-r--r--fs/aufs/vdir.c895
-rw-r--r--fs/aufs/vfsub.c902
-rw-r--r--fs/aufs/vfsub.h355
-rw-r--r--fs/aufs/wbr_policy.c830
-rw-r--r--fs/aufs/whout.c1062
-rw-r--r--fs/aufs/whout.h86
-rw-r--r--fs/aufs/wkq.c392
-rw-r--r--fs/aufs/wkq.h89
-rw-r--r--fs/aufs/xattr.c356
-rw-r--r--fs/aufs/xino.c1956
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/exec.c1
-rw-r--r--fs/fat/Kconfig32
-rw-r--r--fs/fat/dir.c17
-rw-r--r--fs/fat/namei_vfat.c113
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/file_table.c3
-rw-r--r--fs/inode.c3
-rw-r--r--fs/namespace.c9
-rw-r--r--fs/nfs/Kconfig13
-rw-r--r--fs/nfs/client.c12
-rw-r--r--fs/nfs/internal.h7
-rw-r--r--fs/nfs/mount_clnt.c7
-rw-r--r--fs/nfs/super.c34
-rw-r--r--fs/notify/group.c4
-rw-r--r--fs/notify/mark.c4
-rw-r--r--fs/open.c1
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/nommu.c5
-rw-r--r--fs/proc/task_mmu.c7
-rw-r--r--fs/proc/task_nommu.c5
-rw-r--r--fs/read_write.c26
-rw-r--r--fs/splice.c12
-rw-r--r--fs/sync.c3
-rw-r--r--fs/xattr.c1
-rw-r--r--fs/yaffs2/Kconfig171
-rw-r--r--fs/yaffs2/Makefile18
-rw-r--r--fs/yaffs2/yaffs_allocator.c357
-rw-r--r--fs/yaffs2/yaffs_allocator.h30
-rw-r--r--fs/yaffs2/yaffs_attribs.c124
-rw-r--r--fs/yaffs2/yaffs_attribs.h71
-rw-r--r--fs/yaffs2/yaffs_bitmap.c97
-rw-r--r--fs/yaffs2/yaffs_bitmap.h33
-rw-r--r--fs/yaffs2/yaffs_checkptrw.c474
-rw-r--r--fs/yaffs2/yaffs_checkptrw.h33
-rw-r--r--fs/yaffs2/yaffs_ecc.c281
-rw-r--r--fs/yaffs2/yaffs_ecc.h44
-rw-r--r--fs/yaffs2/yaffs_getblockinfo.h35
-rw-r--r--fs/yaffs2/yaffs_guts.c5082
-rw-r--r--fs/yaffs2/yaffs_guts.h1003
-rw-r--r--fs/yaffs2/yaffs_linux.h48
-rw-r--r--fs/yaffs2/yaffs_mtdif.c300
-rw-r--r--fs/yaffs2/yaffs_mtdif.h25
-rw-r--r--fs/yaffs2/yaffs_nameval.c208
-rw-r--r--fs/yaffs2/yaffs_nameval.h28
-rw-r--r--fs/yaffs2/yaffs_nand.c122
-rw-r--r--fs/yaffs2/yaffs_nand.h39
-rw-r--r--fs/yaffs2/yaffs_packedtags1.c56
-rw-r--r--fs/yaffs2/yaffs_packedtags1.h39
-rw-r--r--fs/yaffs2/yaffs_packedtags2.c197
-rw-r--r--fs/yaffs2/yaffs_packedtags2.h47
-rw-r--r--fs/yaffs2/yaffs_summary.c313
-rw-r--r--fs/yaffs2/yaffs_summary.h37
-rw-r--r--fs/yaffs2/yaffs_tagscompat.c381
-rw-r--r--fs/yaffs2/yaffs_tagscompat.h44
-rw-r--r--fs/yaffs2/yaffs_tagsmarshall.c199
-rw-r--r--fs/yaffs2/yaffs_tagsmarshall.h22
-rw-r--r--fs/yaffs2/yaffs_trace.h57
-rw-r--r--fs/yaffs2/yaffs_verify.c529
-rw-r--r--fs/yaffs2/yaffs_verify.h43
-rw-r--r--fs/yaffs2/yaffs_vfs.c3676
-rw-r--r--fs/yaffs2/yaffs_yaffs1.c422
-rw-r--r--fs/yaffs2/yaffs_yaffs1.h22
-rw-r--r--fs/yaffs2/yaffs_yaffs2.c1532
-rw-r--r--fs/yaffs2/yaffs_yaffs2.h39
-rw-r--r--fs/yaffs2/yportenv.h85
-rw-r--r--include/linux/fs.h10
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--include/linux/mm.h22
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mnt_namespace.h3
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/splice.h6
-rw-r--r--include/uapi/linux/aufs_type.h448
-rw-r--r--include/uapi/linux/msdos_fs.h1
-rw-r--r--include/uapi/linux/nfs_mount.h4
-rw-r--r--include/video/uvesafb.h4
-rw-r--r--init/Kconfig25
-rw-r--r--init/do_mounts.c8
-rw-r--r--init/main.c15
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/task_work.c1
-rw-r--r--kernel/uptime_limit.c166
-rw-r--r--mm/Makefile2
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/mmap.c33
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/prfile.c86
-rw-r--r--net/ipv4/arp.c9
-rw-r--r--net/ipv4/devinet.c29
-rw-r--r--[-rwxr-xr-x]scripts/kconfig/mconf-cfg.sh8
-rw-r--r--scripts/mod/modpost.c14
-rw-r--r--security/commoncap.c2
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/security.c10
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--tools/include/linux/compiler.h1
-rw-r--r--tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk2
-rw-r--r--tools/perf/Makefile.config10
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/arch/x86/tests/gen-insn-x86-dat.awk2
-rw-r--r--tools/perf/bench/numa.c3
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c4
-rw-r--r--tools/perf/tests/attr.c20
-rw-r--r--tools/perf/tests/bp_signal.c2
-rw-r--r--tools/perf/tests/bp_signal_overflow.c2
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk2
-rw-r--r--tools/perf/util/libunwind/x86_32.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk2
-rw-r--r--usr/Makefile2
235 files changed, 55447 insertions, 119 deletions
diff --git a/Documentation/ABI/testing/debugfs-aufs b/Documentation/ABI/testing/debugfs-aufs
new file mode 100644
index 000000000000..4a6694194ba6
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-aufs
@@ -0,0 +1,55 @@
+What: /debug/aufs/si_<id>/
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ Under /debug/aufs, a directory named si_<id> is created
+ per aufs mount, where <id> is a unique id generated
+ internally.
+
+What: /debug/aufs/si_<id>/plink
+Date: Apr 2013
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It has three lines and shows the information about the
+ pseudo-link. The first line is a single number
+ representing a number of buckets. The second line is a
+ number of pseudo-links per buckets (separated by a
+ blank). The last line is a single number representing a
+ total number of psedo-links.
+ When the aufs mount option 'noplink' is specified, it
+ will show "1\n0\n0\n".
+
+What: /debug/aufs/si_<id>/xib
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the consumed blocks by xib (External Inode Number
+ Bitmap), its block size and file size.
+ When the aufs mount option 'noxino' is specified, it
+ will be empty. About XINO files, see the aufs manual.
+
+What: /debug/aufs/si_<id>/xi0, xi1 ... xiN and xiN-N
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the consumed blocks by xino (External Inode Number
+ Translation Table), its link count, block size and file
+ size.
+ Due to the file size limit, there may exist multiple
+ xino files per branch. In this case, "-N" is added to
+ the filename and it corresponds to the index of the
+ internal xino array. "-0" is omitted.
+ When the aufs mount option 'noxino' is specified, Those
+ entries won't exist. About XINO files, see the aufs
+ manual.
+
+What: /debug/aufs/si_<id>/xigen
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the consumed blocks by xigen (External Inode
+ Generation Table), its block size and file size.
+ If CONFIG_AUFS_EXPORT is disabled, this entry will not
+ be created.
+ When the aufs mount option 'noxino' is specified, it
+ will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/ABI/testing/sysfs-aufs b/Documentation/ABI/testing/sysfs-aufs
new file mode 100644
index 000000000000..82f9518495ea
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-aufs
@@ -0,0 +1,31 @@
+What: /sys/fs/aufs/si_<id>/
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ Under /sys/fs/aufs, a directory named si_<id> is created
+ per aufs mount, where <id> is a unique id generated
+ internally.
+
+What: /sys/fs/aufs/si_<id>/br0, br1 ... brN
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the abolute path of a member directory (which
+ is called branch) in aufs, and its permission.
+
+What: /sys/fs/aufs/si_<id>/brid0, brid1 ... bridN
+Date: July 2013
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the id of a member directory (which is called
+ branch) in aufs.
+
+What: /sys/fs/aufs/si_<id>/xi_path
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the abolute path of XINO (External Inode Number
+ Bitmap, Translation Table and Generation Table) file
+ even if it is the default path.
+ When the aufs mount option 'noxino' is specified, it
+ will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/filesystems/aufs/README b/Documentation/filesystems/aufs/README
new file mode 100644
index 000000000000..fb05e3b722ea
--- /dev/null
+++ b/Documentation/filesystems/aufs/README
@@ -0,0 +1,395 @@
+
+Aufs4 -- advanced multi layered unification filesystem version 4.x
+http://aufs.sf.net
+Junjiro R. Okajima
+
+
+0. Introduction
+----------------------------------------
+In the early days, aufs was entirely re-designed and re-implemented
+Unionfs Version 1.x series. Adding many original ideas, approaches,
+improvements and implementations, it becomes totally different from
+Unionfs while keeping the basic features.
+Recently, Unionfs Version 2.x series begin taking some of the same
+approaches to aufs1's.
+Unionfs is being developed by Professor Erez Zadok at Stony Brook
+University and his team.
+
+Aufs4 supports linux-4.0 and later, and for linux-3.x series try aufs3.
+If you want older kernel version support, try aufs2-2.6.git or
+aufs2-standalone.git repository, aufs1 from CVS on SourceForge.
+
+Note: it becomes clear that "Aufs was rejected. Let's give it up."
+ According to Christoph Hellwig, linux rejects all union-type
+ filesystems but UnionMount.
+<http://marc.info/?l=linux-kernel&m=123938533724484&w=2>
+
+PS. Al Viro seems have a plan to merge aufs as well as overlayfs and
+ UnionMount, and he pointed out an issue around a directory mutex
+ lock and aufs addressed it. But it is still unsure whether aufs will
+ be merged (or any other union solution).
+<http://marc.info/?l=linux-kernel&m=136312705029295&w=1>
+
+
+1. Features
+----------------------------------------
+- unite several directories into a single virtual filesystem. The member
+ directory is called as a branch.
+- you can specify the permission flags to the branch, which are 'readonly',
+ 'readwrite' and 'whiteout-able.'
+- by upper writable branch, internal copyup and whiteout, files/dirs on
+ readonly branch are modifiable logically.
+- dynamic branch manipulation, add, del.
+- etc...
+
+Also there are many enhancements in aufs, such as:
+- test only the highest one for the directory permission (dirperm1)
+- copyup on open (coo=)
+- 'move' policy for copy-up between two writable branches, after
+ checking free space.
+- xattr, acl
+- readdir(3) in userspace.
+- keep inode number by external inode number table
+- keep the timestamps of file/dir in internal copyup operation
+- seekable directory, supporting NFS readdir.
+- whiteout is hardlinked in order to reduce the consumption of inodes
+ on branch
+- do not copyup, nor create a whiteout when it is unnecessary
+- revert a single systemcall when an error occurs in aufs
+- remount interface instead of ioctl
+- maintain /etc/mtab by an external command, /sbin/mount.aufs.
+- loopback mounted filesystem as a branch
+- kernel thread for removing the dir who has a plenty of whiteouts
+- support copyup sparse file (a file which has a 'hole' in it)
+- default permission flags for branches
+- selectable permission flags for ro branch, whether whiteout can
+ exist or not
+- export via NFS.
+- support <sysfs>/fs/aufs and <debugfs>/aufs.
+- support multiple writable branches, some policies to select one
+ among multiple writable branches.
+- a new semantics for link(2) and rename(2) to support multiple
+ writable branches.
+- no glibc changes are required.
+- pseudo hardlink (hardlink over branches)
+- allow a direct access manually to a file on branch, e.g. bypassing aufs.
+ including NFS or remote filesystem branch.
+- userspace wrapper for pathconf(3)/fpathconf(3) with _PC_LINK_MAX.
+- and more...
+
+Currently these features are dropped temporary from aufs4.
+See design/08plan.txt in detail.
+- nested mount, i.e. aufs as readonly no-whiteout branch of another aufs
+ (robr)
+- statistics of aufs thread (/sys/fs/aufs/stat)
+
+Features or just an idea in the future (see also design/*.txt),
+- reorder the branch index without del/re-add.
+- permanent xino files for NFSD
+- an option for refreshing the opened files after add/del branches
+- light version, without branch manipulation. (unnecessary?)
+- copyup in userspace
+- inotify in userspace
+- readv/writev
+
+
+2. Download
+----------------------------------------
+There are three GIT trees for aufs4, aufs4-linux.git,
+aufs4-standalone.git, and aufs-util.git. Note that there is no "4" in
+"aufs-util.git."
+While the aufs-util is always necessary, you need either of aufs4-linux
+or aufs4-standalone.
+
+The aufs4-linux tree includes the whole linux mainline GIT tree,
+git://git.kernel.org/.../torvalds/linux.git.
+And you cannot select CONFIG_AUFS_FS=m for this version, eg. you cannot
+build aufs4 as an external kernel module.
+Several extra patches are not included in this tree. Only
+aufs4-standalone tree contains them. They are described in the later
+section "Configuration and Compilation."
+
+On the other hand, the aufs4-standalone tree has only aufs source files
+and necessary patches, and you can select CONFIG_AUFS_FS=m.
+But you need to apply all aufs patches manually.
+
+You will find GIT branches whose name is in form of "aufs4.x" where "x"
+represents the linux kernel version, "linux-4.x". For instance,
+"aufs4.0" is for linux-4.0. For latest "linux-4.x-rcN", use
+"aufs4.x-rcN" branch.
+
+o aufs4-linux tree
+$ git clone --reference /your/linux/git/tree \
+ git://github.com/sfjro/aufs4-linux.git aufs4-linux.git
+- if you don't have linux GIT tree, then remove "--reference ..."
+$ cd aufs4-linux.git
+$ git checkout origin/aufs4.0
+
+Or You may want to directly git-pull aufs into your linux GIT tree, and
+leave the patch-work to GIT.
+$ cd /your/linux/git/tree
+$ git remote add aufs4 git://github.com/sfjro/aufs4-linux.git
+$ git fetch aufs4
+$ git checkout -b my4.0 v4.0
+$ (add your local change...)
+$ git pull aufs4 aufs4.0
+- now you have v4.0 + your_changes + aufs4.0 in you my4.0 branch.
+- you may need to solve some conflicts between your_changes and
+ aufs4.0. in this case, git-rerere is recommended so that you can
+ solve the similar conflicts automatically when you upgrade to 4.1 or
+ later in the future.
+
+o aufs4-standalone tree
+$ git clone git://github.com/sfjro/aufs4-standalone.git aufs4-standalone.git
+$ cd aufs4-standalone.git
+$ git checkout origin/aufs4.0
+
+o aufs-util tree
+$ git clone git://git.code.sf.net/p/aufs/aufs-util aufs-util.git
+- note that the public aufs-util.git is on SourceForge instead of
+ GitHUB.
+$ cd aufs-util.git
+$ git checkout origin/aufs4.0
+
+Note: The 4.x-rcN branch is to be used with `rc' kernel versions ONLY.
+The minor version number, 'x' in '4.x', of aufs may not always
+follow the minor version number of the kernel.
+Because changes in the kernel that cause the use of a new
+minor version number do not always require changes to aufs-util.
+
+Since aufs-util has its own minor version number, you may not be
+able to find a GIT branch in aufs-util for your kernel's
+exact minor version number.
+In this case, you should git-checkout the branch for the
+nearest lower number.
+
+For (an unreleased) example:
+If you are using "linux-4.10" and the "aufs4.10" branch
+does not exist in aufs-util repository, then "aufs4.9", "aufs4.8"
+or something numerically smaller is the branch for your kernel.
+
+Also you can view all branches by
+ $ git branch -a
+
+
+3. Configuration and Compilation
+----------------------------------------
+Make sure you have git-checkout'ed the correct branch.
+
+For aufs4-linux tree,
+- enable CONFIG_AUFS_FS.
+- set other aufs configurations if necessary.
+
+For aufs4-standalone tree,
+There are several ways to build.
+
+1.
+- apply ./aufs4-kbuild.patch to your kernel source files.
+- apply ./aufs4-base.patch too.
+- apply ./aufs4-mmap.patch too.
+- apply ./aufs4-standalone.patch too, if you have a plan to set
+ CONFIG_AUFS_FS=m. otherwise you don't need ./aufs4-standalone.patch.
+- copy ./{Documentation,fs,include/uapi/linux/aufs_type.h} files to your
+ kernel source tree. Never copy $PWD/include/uapi/linux/Kbuild.
+- enable CONFIG_AUFS_FS, you can select either
+ =m or =y.
+- and build your kernel as usual.
+- install the built kernel.
+ Note: Since linux-3.9, every filesystem module requires an alias
+ "fs-<fsname>". You should make sure that "fs-aufs" is listed in your
+ modules.aliases file if you set CONFIG_AUFS_FS=m.
+- install the header files too by "make headers_install" to the
+ directory where you specify. By default, it is $PWD/usr.
+ "make help" shows a brief note for headers_install.
+- and reboot your system.
+
+2.
+- module only (CONFIG_AUFS_FS=m).
+- apply ./aufs4-base.patch to your kernel source files.
+- apply ./aufs4-mmap.patch too.
+- apply ./aufs4-standalone.patch too.
+- build your kernel, don't forget "make headers_install", and reboot.
+- edit ./config.mk and set other aufs configurations if necessary.
+ Note: You should read $PWD/fs/aufs/Kconfig carefully which describes
+ every aufs configurations.
+- build the module by simple "make".
+ Note: Since linux-3.9, every filesystem module requires an alias
+ "fs-<fsname>". You should make sure that "fs-aufs" is listed in your
+ modules.aliases file.
+- you can specify ${KDIR} make variable which points to your kernel
+ source tree.
+- install the files
+ + run "make install" to install the aufs module, or copy the built
+ $PWD/aufs.ko to /lib/modules/... and run depmod -a (or reboot simply).
+ + run "make install_headers" (instead of headers_install) to install
+ the modified aufs header file (you can specify DESTDIR which is
+ available in aufs standalone version's Makefile only), or copy
+ $PWD/usr/include/linux/aufs_type.h to /usr/include/linux or wherever
+ you like manually. By default, the target directory is $PWD/usr.
+- no need to apply aufs4-kbuild.patch, nor copying source files to your
+ kernel source tree.
+
+Note: The header file aufs_type.h is necessary to build aufs-util
+ as well as "make headers_install" in the kernel source tree.
+ headers_install is subject to be forgotten, but it is essentially
+ necessary, not only for building aufs-util.
+ You may not meet problems without headers_install in some older
+ version though.
+
+And then,
+- read README in aufs-util, build and install it
+- note that your distribution may contain an obsoleted version of
+ aufs_type.h in /usr/include/linux or something. When you build aufs
+ utilities, make sure that your compiler refers the correct aufs header
+ file which is built by "make headers_install."
+- if you want to use readdir(3) in userspace or pathconf(3) wrapper,
+ then run "make install_ulib" too. And refer to the aufs manual in
+ detail.
+
+There several other patches in aufs4-standalone.git. They are all
+optional. When you meet some problems, they will help you.
+- aufs4-loopback.patch
+ Supports a nested loopback mount in a branch-fs. This patch is
+ unnecessary until aufs produces a message like "you may want to try
+ another patch for loopback file".
+- vfs-ino.patch
+ Modifies a system global kernel internal function get_next_ino() in
+ order to stop assigning 0 for an inode-number. Not directly related to
+ aufs, but recommended generally.
+- tmpfs-idr.patch
+ Keeps the tmpfs inode number as the lowest value. Effective to reduce
+ the size of aufs XINO files for tmpfs branch. Also it prevents the
+ duplication of inode number, which is important for backup tools and
+ other utilities. When you find aufs XINO files for tmpfs branch
+ growing too much, try this patch.
+- lockdep-debug.patch
+ Because aufs is not only an ordinary filesystem (callee of VFS), but
+ also a caller of VFS functions for branch filesystems, subclassing of
+ the internal locks for LOCKDEP is necessary. LOCKDEP is a debugging
+ feature of linux kernel. If you enable CONFIG_LOCKDEP, then you will
+ need to apply this debug patch to expand several constant values.
+ If don't know what LOCKDEP, then you don't have apply this patch.
+
+
+4. Usage
+----------------------------------------
+At first, make sure aufs-util are installed, and please read the aufs
+manual, aufs.5 in aufs-util.git tree.
+$ man -l aufs.5
+
+And then,
+$ mkdir /tmp/rw /tmp/aufs
+# mount -t aufs -o br=/tmp/rw:${HOME} none /tmp/aufs
+
+Here is another example. The result is equivalent.
+# mount -t aufs -o br=/tmp/rw=rw:${HOME}=ro none /tmp/aufs
+ Or
+# mount -t aufs -o br:/tmp/rw none /tmp/aufs
+# mount -o remount,append:${HOME} /tmp/aufs
+
+Then, you can see whole tree of your home dir through /tmp/aufs. If
+you modify a file under /tmp/aufs, the one on your home directory is
+not affected, instead the same named file will be newly created under
+/tmp/rw. And all of your modification to a file will be applied to
+the one under /tmp/rw. This is called the file based Copy on Write
+(COW) method.
+Aufs mount options are described in aufs.5.
+If you run chroot or something and make your aufs as a root directory,
+then you need to customize the shutdown script. See the aufs manual in
+detail.
+
+Additionally, there are some sample usages of aufs which are a
+diskless system with network booting, and LiveCD over NFS.
+See sample dir in CVS tree on SourceForge.
+
+
+5. Contact
+----------------------------------------
+When you have any problems or strange behaviour in aufs, please let me
+know with:
+- /proc/mounts (instead of the output of mount(8))
+- /sys/module/aufs/*
+- /sys/fs/aufs/* (if you have them)
+- /debug/aufs/* (if you have them)
+- linux kernel version
+ if your kernel is not plain, for example modified by distributor,
+ the url where i can download its source is necessary too.
+- aufs version which was printed at loading the module or booting the
+ system, instead of the date you downloaded.
+- configuration (define/undefine CONFIG_AUFS_xxx)
+- kernel configuration or /proc/config.gz (if you have it)
+- behaviour which you think to be incorrect
+- actual operation, reproducible one is better
+- mailto: aufs-users at lists.sourceforge.net
+
+Usually, I don't watch the Public Areas(Bugs, Support Requests, Patches,
+and Feature Requests) on SourceForge. Please join and write to
+aufs-users ML.
+
+
+6. Acknowledgements
+----------------------------------------
+Thanks to everyone who have tried and are using aufs, whoever
+have reported a bug or any feedback.
+
+Especially donators:
+Tomas Matejicek(slax.org) made a donation (much more than once).
+ Since Apr 2010, Tomas M (the author of Slax and Linux Live
+ scripts) is making "doubling" donations.
+ Unfortunately I cannot list all of the donators, but I really
+ appreciate.
+ It ends Aug 2010, but the ordinary donation URL is still available.
+ <http://sourceforge.net/donate/index.php?group_id=167503>
+Dai Itasaka made a donation (2007/8).
+Chuck Smith made a donation (2008/4, 10 and 12).
+Henk Schoneveld made a donation (2008/9).
+Chih-Wei Huang, ASUS, CTC donated Eee PC 4G (2008/10).
+Francois Dupoux made a donation (2008/11).
+Bruno Cesar Ribas and Luis Carlos Erpen de Bona, C3SL serves public
+ aufs2 GIT tree (2009/2).
+William Grant made a donation (2009/3).
+Patrick Lane made a donation (2009/4).
+The Mail Archive (mail-archive.com) made donations (2009/5).
+Nippy Networks (Ed Wildgoose) made a donation (2009/7).
+New Dream Network, LLC (www.dreamhost.com) made a donation (2009/11).
+Pavel Pronskiy made a donation (2011/2).
+Iridium and Inmarsat satellite phone retailer (www.mailasail.com), Nippy
+ Networks (Ed Wildgoose) made a donation for hardware (2011/3).
+Max Lekomcev (DOM-TV project) made a donation (2011/7, 12, 2012/3, 6 and
+11).
+Sam Liddicott made a donation (2011/9).
+Era Scarecrow made a donation (2013/4).
+Bor Ratajc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+POIRETTE Marc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+lauri kasvandik made a donation (2013/5).
+"pemasu from Finland" made a donation (2013/7).
+The Parted Magic Project made a donation (2013/9 and 11).
+Pavel Barta made a donation (2013/10).
+Nikolay Pertsev made a donation (2014/5).
+James B made a donation (2014/7 and 2015/7).
+Stefano Di Biase made a donation (2014/8).
+Daniel Epellei made a donation (2015/1).
+OmegaPhil made a donation (2016/1, 2018/4).
+Tomasz Szewczyk made a donation (2016/4).
+James Burry made a donation (2016/12).
+Carsten Rose made a donation (2018/9).
+Porteus Kiosk made a donation (2018/10).
+
+Thank you very much.
+Donations are always, including future donations, very important and
+helpful for me to keep on developing aufs.
+
+
+7.
+----------------------------------------
+If you are an experienced user, no explanation is needed. Aufs is
+just a linux filesystem.
+
+
+Enjoy!
+
+# Local variables: ;
+# mode: text;
+# End: ;
diff --git a/Documentation/filesystems/aufs/design/01intro.txt b/Documentation/filesystems/aufs/design/01intro.txt
new file mode 100644
index 000000000000..609bf0e9b93a
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/01intro.txt
@@ -0,0 +1,171 @@
+
+# Copyright (C) 2005-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Introduction
+----------------------------------------
+
+aufs [ei ju: ef es] | /ey-yoo-ef-es/ | [a u f s]
+1. abbrev. for "advanced multi-layered unification filesystem".
+2. abbrev. for "another unionfs".
+3. abbrev. for "auf das" in German which means "on the" in English.
+ Ex. "Butter aufs Brot"(G) means "butter onto bread"(E).
+ But "Filesystem aufs Filesystem" is hard to understand.
+4. abbrev. for "African Urban Fashion Show".
+
+AUFS is a filesystem with features:
+- multi layered stackable unification filesystem, the member directory
+ is called as a branch.
+- branch permission and attribute, 'readonly', 'real-readonly',
+ 'readwrite', 'whiteout-able', 'link-able whiteout', etc. and their
+ combination.
+- internal "file copy-on-write".
+- logical deletion, whiteout.
+- dynamic branch manipulation, adding, deleting and changing permission.
+- allow bypassing aufs, user's direct branch access.
+- external inode number translation table and bitmap which maintains the
+ persistent aufs inode number.
+- seekable directory, including NFS readdir.
+- file mapping, mmap and sharing pages.
+- pseudo-link, hardlink over branches.
+- loopback mounted filesystem as a branch.
+- several policies to select one among multiple writable branches.
+- revert a single systemcall when an error occurs in aufs.
+- and more...
+
+
+Multi Layered Stackable Unification Filesystem
+----------------------------------------------------------------------
+Most people already knows what it is.
+It is a filesystem which unifies several directories and provides a
+merged single directory. When users access a file, the access will be
+passed/re-directed/converted (sorry, I am not sure which English word is
+correct) to the real file on the member filesystem. The member
+filesystem is called 'lower filesystem' or 'branch' and has a mode
+'readonly' and 'readwrite.' And the deletion for a file on the lower
+readonly branch is handled by creating 'whiteout' on the upper writable
+branch.
+
+On LKML, there have been discussions about UnionMount (Jan Blunck,
+Bharata B Rao and Valerie Aurora) and Unionfs (Erez Zadok). They took
+different approaches to implement the merged-view.
+The former tries putting it into VFS, and the latter implements as a
+separate filesystem.
+(If I misunderstand about these implementations, please let me know and
+I shall correct it. Because it is a long time ago when I read their
+source files last time).
+
+UnionMount's approach will be able to small, but may be hard to share
+branches between several UnionMount since the whiteout in it is
+implemented in the inode on branch filesystem and always
+shared. According to Bharata's post, readdir does not seems to be
+finished yet.
+There are several missing features known in this implementations such as
+- for users, the inode number may change silently. eg. copy-up.
+- link(2) may break by copy-up.
+- read(2) may get an obsoleted filedata (fstat(2) too).
+- fcntl(F_SETLK) may be broken by copy-up.
+- unnecessary copy-up may happen, for example mmap(MAP_PRIVATE) after
+ open(O_RDWR).
+
+In linux-3.18, "overlay" filesystem (formerly known as "overlayfs") was
+merged into mainline. This is another implementation of UnionMount as a
+separated filesystem. All the limitations and known problems which
+UnionMount are equally inherited to "overlay" filesystem.
+
+Unionfs has a longer history. When I started implementing a stackable
+filesystem (Aug 2005), it already existed. It has virtual super_block,
+inode, dentry and file objects and they have an array pointing lower
+same kind objects. After contributing many patches for Unionfs, I
+re-started my project AUFS (Jun 2006).
+
+In AUFS, the structure of filesystem resembles to Unionfs, but I
+implemented my own ideas, approaches and enhancements and it became
+totally different one.
+
+Comparing DM snapshot and fs based implementation
+- the number of bytes to be copied between devices is much smaller.
+- the type of filesystem must be one and only.
+- the fs must be writable, no readonly fs, even for the lower original
+ device. so the compression fs will not be usable. but if we use
+ loopback mount, we may address this issue.
+ for instance,
+ mount /cdrom/squashfs.img /sq
+ losetup /sq/ext2.img
+ losetup /somewhere/cow
+ dmsetup "snapshot /dev/loop0 /dev/loop1 ..."
+- it will be difficult (or needs more operations) to extract the
+ difference between the original device and COW.
+- DM snapshot-merge may help a lot when users try merging. in the
+ fs-layer union, users will use rsync(1).
+
+You may want to read my old paper "Filesystems in LiveCD"
+(http://aufs.sourceforge.net/aufs2/report/sq/sq.pdf).
+
+
+Several characters/aspects/persona of aufs
+----------------------------------------------------------------------
+
+Aufs has several characters, aspects or persona.
+1. a filesystem, callee of VFS helper
+2. sub-VFS, caller of VFS helper for branches
+3. a virtual filesystem which maintains persistent inode number
+4. reader/writer of files on branches such like an application
+
+1. Callee of VFS Helper
+As an ordinary linux filesystem, aufs is a callee of VFS. For instance,
+unlink(2) from an application reaches sys_unlink() kernel function and
+then vfs_unlink() is called. vfs_unlink() is one of VFS helper and it
+calls filesystem specific unlink operation. Actually aufs implements the
+unlink operation but it behaves like a redirector.
+
+2. Caller of VFS Helper for Branches
+aufs_unlink() passes the unlink request to the branch filesystem as if
+it were called from VFS. So the called unlink operation of the branch
+filesystem acts as usual. As a caller of VFS helper, aufs should handle
+every necessary pre/post operation for the branch filesystem.
+- acquire the lock for the parent dir on a branch
+- lookup in a branch
+- revalidate dentry on a branch
+- mnt_want_write() for a branch
+- vfs_unlink() for a branch
+- mnt_drop_write() for a branch
+- release the lock on a branch
+
+3. Persistent Inode Number
+One of the most important issue for a filesystem is to maintain inode
+numbers. This is particularly important to support exporting a
+filesystem via NFS. Aufs is a virtual filesystem which doesn't have a
+backend block device for its own. But some storage is necessary to
+keep and maintain the inode numbers. It may be a large space and may not
+suit to keep in memory. Aufs rents some space from its first writable
+branch filesystem (by default) and creates file(s) on it. These files
+are created by aufs internally and removed soon (currently) keeping
+opened.
+Note: Because these files are removed, they are totally gone after
+ unmounting aufs. It means the inode numbers are not persistent
+ across unmount or reboot. I have a plan to make them really
+ persistent which will be important for aufs on NFS server.
+
+4. Read/Write Files Internally (copy-on-write)
+Because a branch can be readonly, when you write a file on it, aufs will
+"copy-up" it to the upper writable branch internally. And then write the
+originally requested thing to the file. Generally kernel doesn't
+open/read/write file actively. In aufs, even a single write may cause a
+internal "file copy". This behaviour is very similar to cp(1) command.
+
+Some people may think it is better to pass such work to user space
+helper, instead of doing in kernel space. Actually I am still thinking
+about it. But currently I have implemented it in kernel space.
diff --git a/Documentation/filesystems/aufs/design/02struct.txt b/Documentation/filesystems/aufs/design/02struct.txt
new file mode 100644
index 000000000000..2467788c7f10
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/02struct.txt
@@ -0,0 +1,258 @@
+
+# Copyright (C) 2005-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Basic Aufs Internal Structure
+
+Superblock/Inode/Dentry/File Objects
+----------------------------------------------------------------------
+As like an ordinary filesystem, aufs has its own
+superblock/inode/dentry/file objects. All these objects have a
+dynamically allocated array and store the same kind of pointers to the
+lower filesystem, branch.
+For example, when you build a union with one readwrite branch and one
+readonly, mounted /au, /rw and /ro respectively.
+- /au = /rw + /ro
+- /ro/fileA exists but /rw/fileA
+
+Aufs lookup operation finds /ro/fileA and gets dentry for that. These
+pointers are stored in a aufs dentry. The array in aufs dentry will be,
+- [0] = NULL (because /rw/fileA doesn't exist)
+- [1] = /ro/fileA
+
+This style of an array is essentially same to the aufs
+superblock/inode/dentry/file objects.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+branches dynamically, these objects has its own generation. When
+branches are changed, the generation in aufs superblock is
+incremented. And a generation in other object are compared when it is
+accessed. When a generation in other objects are obsoleted, aufs
+refreshes the internal array.
+
+
+Superblock
+----------------------------------------------------------------------
+Additionally aufs superblock has some data for policies to select one
+among multiple writable branches, XIB files, pseudo-links and kobject.
+See below in detail.
+About the policies which supports copy-down a directory, see
+wbr_policy.txt too.
+
+
+Branch and XINO(External Inode Number Translation Table)
+----------------------------------------------------------------------
+Every branch has its own xino (external inode number translation table)
+file. The xino file is created and unlinked by aufs internally. When two
+members of a union exist on the same filesystem, they share the single
+xino file.
+The struct of a xino file is simple, just a sequence of aufs inode
+numbers which is indexed by the lower inode number.
+In the above sample, assume the inode number of /ro/fileA is i111 and
+aufs assigns the inode number i999 for fileA. Then aufs writes 999 as
+4(8) bytes at 111 * 4(8) bytes offset in the xino file.
+
+When the inode numbers are not contiguous, the xino file will be sparse
+which has a hole in it and doesn't consume as much disk space as it
+might appear. If your branch filesystem consumes disk space for such
+holes, then you should specify 'xino=' option at mounting aufs.
+
+Aufs has a mount option to free the disk blocks for such holes in XINO
+files on tmpfs or ramdisk. But it is not so effective actually. If you
+meet a problem of disk shortage due to XINO files, then you should try
+"tmpfs-ino.patch" (and "vfs-ino.patch" too) in aufs4-standalone.git.
+The patch localizes the assignment inumbers per tmpfs-mount and avoid
+the holes in XINO files.
+
+Also a writable branch has three kinds of "whiteout bases". All these
+are existed when the branch is joined to aufs, and their names are
+whiteout-ed doubly, so that users will never see their names in aufs
+hierarchy.
+1. a regular file which will be hardlinked to all whiteouts.
+2. a directory to store a pseudo-link.
+3. a directory to store an "orphan"-ed file temporary.
+
+1. Whiteout Base
+ When you remove a file on a readonly branch, aufs handles it as a
+ logical deletion and creates a whiteout on the upper writable branch
+ as a hardlink of this file in order not to consume inode on the
+ writable branch.
+2. Pseudo-link Dir
+ See below, Pseudo-link.
+3. Step-Parent Dir
+ When "fileC" exists on the lower readonly branch only and it is
+ opened and removed with its parent dir, and then user writes
+ something into it, then aufs copies-up fileC to this
+ directory. Because there is no other dir to store fileC. After
+ creating a file under this dir, the file is unlinked.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+dynamically, a branch has its own id. When the branch order changes,
+aufs finds the new index by searching the branch id.
+
+
+Pseudo-link
+----------------------------------------------------------------------
+Assume "fileA" exists on the lower readonly branch only and it is
+hardlinked to "fileB" on the branch. When you write something to fileA,
+aufs copies-up it to the upper writable branch. Additionally aufs
+creates a hardlink under the Pseudo-link Directory of the writable
+branch. The inode of a pseudo-link is kept in aufs super_block as a
+simple list. If fileB is read after unlinking fileA, aufs returns
+filedata from the pseudo-link instead of the lower readonly
+branch. Because the pseudo-link is based upon the inode, to keep the
+inode number by xino (see above) is essentially necessary.
+
+All the hardlinks under the Pseudo-link Directory of the writable branch
+should be restored in a proper location later. Aufs provides a utility
+to do this. The userspace helpers executed at remounting and unmounting
+aufs by default.
+During this utility is running, it puts aufs into the pseudo-link
+maintenance mode. In this mode, only the process which began the
+maintenance mode (and its child processes) is allowed to operate in
+aufs. Some other processes which are not related to the pseudo-link will
+be allowed to run too, but the rest have to return an error or wait
+until the maintenance mode ends. If a process already acquires an inode
+mutex (in VFS), it has to return an error.
+
+
+XIB(external inode number bitmap)
+----------------------------------------------------------------------
+Addition to the xino file per a branch, aufs has an external inode number
+bitmap in a superblock object. It is also an internal file such like a
+xino file.
+It is a simple bitmap to mark whether the aufs inode number is in-use or
+not.
+To reduce the file I/O, aufs prepares a single memory page to cache xib.
+
+As well as XINO files, aufs has a feature to truncate/refresh XIB to
+reduce the number of consumed disk blocks for these files.
+
+
+Virtual or Vertical Dir, and Readdir in Userspace
+----------------------------------------------------------------------
+In order to support multiple layers (branches), aufs readdir operation
+constructs a virtual dir block on memory. For readdir, aufs calls
+vfs_readdir() internally for each dir on branches, merges their entries
+with eliminating the whiteout-ed ones, and sets it to file (dir)
+object. So the file object has its entry list until it is closed. The
+entry list will be updated when the file position is zero and becomes
+obsoleted. This decision is made in aufs automatically.
+
+The dynamically allocated memory block for the name of entries has a
+unit of 512 bytes (by default) and stores the names contiguously (no
+padding). Another block for each entry is handled by kmem_cache too.
+During building dir blocks, aufs creates hash list and judging whether
+the entry is whiteouted by its upper branch or already listed.
+The merged result is cached in the corresponding inode object and
+maintained by a customizable life-time option.
+
+Some people may call it can be a security hole or invite DoS attack
+since the opened and once readdir-ed dir (file object) holds its entry
+list and becomes a pressure for system memory. But I'd say it is similar
+to files under /proc or /sys. The virtual files in them also holds a
+memory page (generally) while they are opened. When an idea to reduce
+memory for them is introduced, it will be applied to aufs too.
+For those who really hate this situation, I've developed readdir(3)
+library which operates this merging in userspace. You just need to set
+LD_PRELOAD environment variable, and aufs will not consume no memory in
+kernel space for readdir(3).
+
+
+Workqueue
+----------------------------------------------------------------------
+Aufs sometimes requires privilege access to a branch. For instance,
+in copy-up/down operation. When a user process is going to make changes
+to a file which exists in the lower readonly branch only, and the mode
+of one of ancestor directories may not be writable by a user
+process. Here aufs copy-up the file with its ancestors and they may
+require privilege to set its owner/group/mode/etc.
+This is a typical case of a application character of aufs (see
+Introduction).
+
+Aufs uses workqueue synchronously for this case. It creates its own
+workqueue. The workqueue is a kernel thread and has privilege. Aufs
+passes the request to call mkdir or write (for example), and wait for
+its completion. This approach solves a problem of a signal handler
+simply.
+If aufs didn't adopt the workqueue and changed the privilege of the
+process, then the process may receive the unexpected SIGXFSZ or other
+signals.
+
+Also aufs uses the system global workqueue ("events" kernel thread) too
+for asynchronous tasks, such like handling inotify/fsnotify, re-creating a
+whiteout base and etc. This is unrelated to a privilege.
+Most of aufs operation tries acquiring a rw_semaphore for aufs
+superblock at the beginning, at the same time waits for the completion
+of all queued asynchronous tasks.
+
+
+Whiteout
+----------------------------------------------------------------------
+The whiteout in aufs is very similar to Unionfs's. That is represented
+by its filename. UnionMount takes an approach of a file mode, but I am
+afraid several utilities (find(1) or something) will have to support it.
+
+Basically the whiteout represents "logical deletion" which stops aufs to
+lookup further, but also it represents "dir is opaque" which also stop
+further lookup.
+
+In aufs, rmdir(2) and rename(2) for dir uses whiteout alternatively.
+In order to make several functions in a single systemcall to be
+revertible, aufs adopts an approach to rename a directory to a temporary
+unique whiteouted name.
+For example, in rename(2) dir where the target dir already existed, aufs
+renames the target dir to a temporary unique whiteouted name before the
+actual rename on a branch, and then handles other actions (make it opaque,
+update the attributes, etc). If an error happens in these actions, aufs
+simply renames the whiteouted name back and returns an error. If all are
+succeeded, aufs registers a function to remove the whiteouted unique
+temporary name completely and asynchronously to the system global
+workqueue.
+
+
+Copy-up
+----------------------------------------------------------------------
+It is a well-known feature or concept.
+When user modifies a file on a readonly branch, aufs operate "copy-up"
+internally and makes change to the new file on the upper writable branch.
+When the trigger systemcall does not update the timestamps of the parent
+dir, aufs reverts it after copy-up.
+
+
+Move-down (aufs3.9 and later)
+----------------------------------------------------------------------
+"Copy-up" is one of the essential feature in aufs. It copies a file from
+the lower readonly branch to the upper writable branch when a user
+changes something about the file.
+"Move-down" is an opposite action of copy-up. Basically this action is
+ran manually instead of automatically and internally.
+For desgin and implementation, aufs has to consider these issues.
+- whiteout for the file may exist on the lower branch.
+- ancestor directories may not exist on the lower branch.
+- diropq for the ancestor directories may exist on the upper branch.
+- free space on the lower branch will reduce.
+- another access to the file may happen during moving-down, including
+ UDBA (see "Revalidate Dentry and UDBA").
+- the file should not be hard-linked nor pseudo-linked. they should be
+ handled by auplink utility later.
+
+Sometimes users want to move-down a file from the upper writable branch
+to the lower readonly or writable branch. For instance,
+- the free space of the upper writable branch is going to run out.
+- create a new intermediate branch between the upper and lower branch.
+- etc.
+
+For this purpose, use "aumvdown" command in aufs-util.git.
diff --git a/Documentation/filesystems/aufs/design/03atomic_open.txt b/Documentation/filesystems/aufs/design/03atomic_open.txt
new file mode 100644
index 000000000000..9f16ac1e0173
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/03atomic_open.txt
@@ -0,0 +1,85 @@
+
+# Copyright (C) 2015-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Support for a branch who has its ->atomic_open()
+----------------------------------------------------------------------
+The filesystems who implement its ->atomic_open() are not majority. For
+example NFSv4 does, and aufs should call NFSv4 ->atomic_open,
+particularly for open(O_CREAT|O_EXCL, 0400) case. Other than
+->atomic_open(), NFSv4 returns an error for this open(2). While I am not
+sure whether all filesystems who have ->atomic_open() behave like this,
+but NFSv4 surely returns the error.
+
+In order to support ->atomic_open() for aufs, there are a few
+approaches.
+
+A. Introduce aufs_atomic_open()
+ - calls one of VFS:do_last(), lookup_open() or atomic_open() for
+ branch fs.
+B. Introduce aufs_atomic_open() calling create, open and chmod. this is
+ an aufs user Pip Cet's approach
+ - calls aufs_create(), VFS finish_open() and notify_change().
+ - pass fake-mode to finish_open(), and then correct the mode by
+ notify_change().
+C. Extend aufs_open() to call branch fs's ->atomic_open()
+ - no aufs_atomic_open().
+ - aufs_lookup() registers the TID to an aufs internal object.
+ - aufs_create() does nothing when the matching TID is registered, but
+ registers the mode.
+ - aufs_open() calls branch fs's ->atomic_open() when the matching
+ TID is registered.
+D. Extend aufs_open() to re-try branch fs's ->open() with superuser's
+ credential
+ - no aufs_atomic_open().
+ - aufs_create() registers the TID to an internal object. this info
+ represents "this process created this file just now."
+ - when aufs gets EACCES from branch fs's ->open(), then confirm the
+ registered TID and re-try open() with superuser's credential.
+
+Pros and cons for each approach.
+
+A.
+ - straightforward but highly depends upon VFS internal.
+ - the atomic behavaiour is kept.
+ - some of parameters such as nameidata are hard to reproduce for
+ branch fs.
+ - large overhead.
+B.
+ - easy to implement.
+ - the atomic behavaiour is lost.
+C.
+ - the atomic behavaiour is kept.
+ - dirty and tricky.
+ - VFS checks whether the file is created correctly after calling
+ ->create(), which means this approach doesn't work.
+D.
+ - easy to implement.
+ - the atomic behavaiour is lost.
+ - to open a file with superuser's credential and give it to a user
+ process is a bad idea, since the file object keeps the credential
+ in it. It may affect LSM or something. This approach doesn't work
+ either.
+
+The approach A is ideal, but it hard to implement. So here is a
+variation of A, which is to be implemented.
+
+A-1. Introduce aufs_atomic_open()
+ - calls branch fs ->atomic_open() if exists. otherwise calls
+ vfs_create() and finish_open().
+ - the demerit is that the several checks after branch fs
+ ->atomic_open() are lost. in the ordinary case, the checks are
+ done by VFS:do_last(), lookup_open() and atomic_open(). some can
+ be implemented in aufs, but not all I am afraid.
diff --git a/Documentation/filesystems/aufs/design/03lookup.txt b/Documentation/filesystems/aufs/design/03lookup.txt
new file mode 100644
index 000000000000..08b443d94dfa
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/03lookup.txt
@@ -0,0 +1,113 @@
+
+# Copyright (C) 2005-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Lookup in a Branch
+----------------------------------------------------------------------
+Since aufs has a character of sub-VFS (see Introduction), it operates
+lookup for branches as VFS does. It may be a heavy work. But almost all
+lookup operation in aufs is the simplest case, ie. lookup only an entry
+directly connected to its parent. Digging down the directory hierarchy
+is unnecessary. VFS has a function lookup_one_len() for that use, and
+aufs calls it.
+
+When a branch is a remote filesystem, aufs basically relies upon its
+->d_revalidate(), also aufs forces the hardest revalidate tests for
+them.
+For d_revalidate, aufs implements three levels of revalidate tests. See
+"Revalidate Dentry and UDBA" in detail.
+
+
+Test Only the Highest One for the Directory Permission (dirperm1 option)
+----------------------------------------------------------------------
+Let's try case study.
+- aufs has two branches, upper readwrite and lower readonly.
+ /au = /rw + /ro
+- "dirA" exists under /ro, but /rw. and its mode is 0700.
+- user invoked "chmod a+rx /au/dirA"
+- the internal copy-up is activated and "/rw/dirA" is created and its
+ permission bits are set to world readable.
+- then "/au/dirA" becomes world readable?
+
+In this case, /ro/dirA is still 0700 since it exists in readonly branch,
+or it may be a natively readonly filesystem. If aufs respects the lower
+branch, it should not respond readdir request from other users. But user
+allowed it by chmod. Should really aufs rejects showing the entries
+under /ro/dirA?
+
+To be honest, I don't have a good solution for this case. So aufs
+implements 'dirperm1' and 'nodirperm1' mount options, and leave it to
+users.
+When dirperm1 is specified, aufs checks only the highest one for the
+directory permission, and shows the entries. Otherwise, as usual, checks
+every dir existing on all branches and rejects the request.
+
+As a side effect, dirperm1 option improves the performance of aufs
+because the number of permission check is reduced when the number of
+branch is many.
+
+
+Revalidate Dentry and UDBA (User's Direct Branch Access)
+----------------------------------------------------------------------
+Generally VFS helpers re-validate a dentry as a part of lookup.
+0. digging down the directory hierarchy.
+1. lock the parent dir by its i_mutex.
+2. lookup the final (child) entry.
+3. revalidate it.
+4. call the actual operation (create, unlink, etc.)
+5. unlock the parent dir
+
+If the filesystem implements its ->d_revalidate() (step 3), then it is
+called. Actually aufs implements it and checks the dentry on a branch is
+still valid.
+But it is not enough. Because aufs has to release the lock for the
+parent dir on a branch at the end of ->lookup() (step 2) and
+->d_revalidate() (step 3) while the i_mutex of the aufs dir is still
+held by VFS.
+If the file on a branch is changed directly, eg. bypassing aufs, after
+aufs released the lock, then the subsequent operation may cause
+something unpleasant result.
+
+This situation is a result of VFS architecture, ->lookup() and
+->d_revalidate() is separated. But I never say it is wrong. It is a good
+design from VFS's point of view. It is just not suitable for sub-VFS
+character in aufs.
+
+Aufs supports such case by three level of revalidation which is
+selectable by user.
+1. Simple Revalidate
+ Addition to the native flow in VFS's, confirm the child-parent
+ relationship on the branch just after locking the parent dir on the
+ branch in the "actual operation" (step 4). When this validation
+ fails, aufs returns EBUSY. ->d_revalidate() (step 3) in aufs still
+ checks the validation of the dentry on branches.
+2. Monitor Changes Internally by Inotify/Fsnotify
+ Addition to above, in the "actual operation" (step 4) aufs re-lookup
+ the dentry on the branch, and returns EBUSY if it finds different
+ dentry.
+ Additionally, aufs sets the inotify/fsnotify watch for every dir on branches
+ during it is in cache. When the event is notified, aufs registers a
+ function to kernel 'events' thread by schedule_work(). And the
+ function sets some special status to the cached aufs dentry and inode
+ private data. If they are not cached, then aufs has nothing to
+ do. When the same file is accessed through aufs (step 0-3) later,
+ aufs will detect the status and refresh all necessary data.
+ In this mode, aufs has to ignore the event which is fired by aufs
+ itself.
+3. No Extra Validation
+ This is the simplest test and doesn't add any additional revalidation
+ test, and skip the revalidation in step 4. It is useful and improves
+ aufs performance when system surely hide the aufs branches from user,
+ by over-mounting something (or another method).
diff --git a/Documentation/filesystems/aufs/design/04branch.txt b/Documentation/filesystems/aufs/design/04branch.txt
new file mode 100644
index 000000000000..3ab3682a42b4
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/04branch.txt
@@ -0,0 +1,74 @@
+
+# Copyright (C) 2005-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Branch Manipulation
+
+Since aufs supports dynamic branch manipulation, ie. add/remove a branch
+and changing its permission/attribute, there are a lot of works to do.
+
+
+Add a Branch
+----------------------------------------------------------------------
+o Confirm the adding dir exists outside of aufs, including loopback
+ mount, and its various attributes.
+o Initialize the xino file and whiteout bases if necessary.
+ See struct.txt.
+
+o Check the owner/group/mode of the directory
+ When the owner/group/mode of the adding directory differs from the
+ existing branch, aufs issues a warning because it may impose a
+ security risk.
+ For example, when a upper writable branch has a world writable empty
+ top directory, a malicious user can create any files on the writable
+ branch directly, like copy-up and modify manually. If something like
+ /etc/{passwd,shadow} exists on the lower readonly branch but the upper
+ writable branch, and the writable branch is world-writable, then a
+ malicious guy may create /etc/passwd on the writable branch directly
+ and the infected file will be valid in aufs.
+ I am afraid it can be a security issue, but aufs can do nothing except
+ producing a warning.
+
+
+Delete a Branch
+----------------------------------------------------------------------
+o Confirm the deleting branch is not busy
+ To be general, there is one merit to adopt "remount" interface to
+ manipulate branches. It is to discard caches. At deleting a branch,
+ aufs checks the still cached (and connected) dentries and inodes. If
+ there are any, then they are all in-use. An inode without its
+ corresponding dentry can be alive alone (for example, inotify/fsnotify case).
+
+ For the cached one, aufs checks whether the same named entry exists on
+ other branches.
+ If the cached one is a directory, because aufs provides a merged view
+ to users, as long as one dir is left on any branch aufs can show the
+ dir to users. In this case, the branch can be removed from aufs.
+ Otherwise aufs rejects deleting the branch.
+
+ If any file on the deleting branch is opened by aufs, then aufs
+ rejects deleting.
+
+
+Modify the Permission of a Branch
+----------------------------------------------------------------------
+o Re-initialize or remove the xino file and whiteout bases if necessary.
+ See struct.txt.
+
+o rw --> ro: Confirm the modifying branch is not busy
+ Aufs rejects the request if any of these conditions are true.
+ - a file on the branch is mmap-ed.
+ - a regular file on the branch is opened for write and there is no
+ same named entry on the upper branch.
diff --git a/Documentation/filesystems/aufs/design/05wbr_policy.txt b/Documentation/filesystems/aufs/design/05wbr_policy.txt
new file mode 100644
index 000000000000..1f9b86bc6a3e
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/05wbr_policy.txt
@@ -0,0 +1,64 @@
+
+# Copyright (C) 2005-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Policies to Select One among Multiple Writable Branches
+----------------------------------------------------------------------
+When the number of writable branch is more than one, aufs has to decide
+the target branch for file creation or copy-up. By default, the highest
+writable branch which has the parent (or ancestor) dir of the target
+file is chosen (top-down-parent policy).
+By user's request, aufs implements some other policies to select the
+writable branch, for file creation several policies, round-robin,
+most-free-space, and other policies. For copy-up, top-down-parent,
+bottom-up-parent, bottom-up and others.
+
+As expected, the round-robin policy selects the branch in circular. When
+you have two writable branches and creates 10 new files, 5 files will be
+created for each branch. mkdir(2) systemcall is an exception. When you
+create 10 new directories, all will be created on the same branch.
+And the most-free-space policy selects the one which has most free
+space among the writable branches. The amount of free space will be
+checked by aufs internally, and users can specify its time interval.
+
+The policies for copy-up is more simple,
+top-down-parent is equivalent to the same named on in create policy,
+bottom-up-parent selects the writable branch where the parent dir
+exists and the nearest upper one from the copyup-source,
+bottom-up selects the nearest upper writable branch from the
+copyup-source, regardless the existence of the parent dir.
+
+There are some rules or exceptions to apply these policies.
+- If there is a readonly branch above the policy-selected branch and
+ the parent dir is marked as opaque (a variation of whiteout), or the
+ target (creating) file is whiteout-ed on the upper readonly branch,
+ then the result of the policy is ignored and the target file will be
+ created on the nearest upper writable branch than the readonly branch.
+- If there is a writable branch above the policy-selected branch and
+ the parent dir is marked as opaque or the target file is whiteouted
+ on the branch, then the result of the policy is ignored and the target
+ file will be created on the highest one among the upper writable
+ branches who has diropq or whiteout. In case of whiteout, aufs removes
+ it as usual.
+- link(2) and rename(2) systemcalls are exceptions in every policy.
+ They try selecting the branch where the source exists as possible
+ since copyup a large file will take long time. If it can't be,
+ ie. the branch where the source exists is readonly, then they will
+ follow the copyup policy.
+- There is an exception for rename(2) when the target exists.
+ If the rename target exists, aufs compares the index of the branches
+ where the source and the target exists and selects the higher
+ one. If the selected branch is readonly, then aufs follows the
+ copyup policy.
diff --git a/Documentation/filesystems/aufs/design/06dirren.dot b/Documentation/filesystems/aufs/design/06dirren.dot
new file mode 100644
index 000000000000..2d62bb6dd55f
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06dirren.dot
@@ -0,0 +1,31 @@
+
+// to view this graph, run dot(1) command in GRAPHVIZ.
+
+digraph G {
+node [shape=box];
+whinfo [label="detailed info file\n(lower_brid_root-hinum, h_inum, namelen, old name)"];
+
+node [shape=oval];
+
+aufs_rename -> whinfo [label="store/remove"];
+
+node [shape=oval];
+inode_list [label="h_inum list in branch\ncache"];
+
+node [shape=box];
+whinode [label="h_inum list file"];
+
+node [shape=oval];
+brmgmt [label="br_add/del/mod/umount"];
+
+brmgmt -> inode_list [label="create/remove"];
+brmgmt -> whinode [label="load/store"];
+
+inode_list -> whinode [style=dashed,dir=both];
+
+aufs_rename -> inode_list [label="add/del"];
+
+aufs_lookup -> inode_list [label="search"];
+
+aufs_lookup -> whinfo [label="load/remove"];
+}
diff --git a/Documentation/filesystems/aufs/design/06dirren.txt b/Documentation/filesystems/aufs/design/06dirren.txt
new file mode 100644
index 000000000000..3b80f8659b3e
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06dirren.txt
@@ -0,0 +1,102 @@
+
+# Copyright (C) 2017-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Special handling for renaming a directory (DIRREN)
+----------------------------------------------------------------------
+First, let's assume we have a simple usecase.
+
+- /u = /rw + /ro
+- /rw/dirA exists
+- /ro/dirA and /ro/dirA/file exist too
+- there is no dirB on both branches
+- a user issues rename("dirA", "dirB")
+
+Now, what should aufs behave against this rename(2)?
+There are a few possible cases.
+
+A. returns EROFS.
+ since dirA exists on a readonly branch which cannot be renamed.
+B. returns EXDEV.
+ it is possible to copy-up dirA (only the dir itself), but the child
+ entries ("file" in this case) should not be. it must be a bad
+ approach to copy-up recursively.
+C. returns a success.
+ even the branch /ro is readonly, aufs tries renaming it. Obviously it
+ is a violation of aufs' policy.
+D. construct an extra information which indicates that /ro/dirA should
+ be handled as the name of dirB.
+ overlayfs has a similar feature called REDIRECT.
+
+Until now, aufs implements the case B only which returns EXDEV, and
+expects the userspace application behaves like mv(1) which tries
+issueing rename(2) recursively.
+
+A new aufs feature called DIRREN is introduced which implements the case
+D. There are several "extra information" added.
+
+1. detailed info per renamed directory
+ path: /rw/dirB/$AUFS_WH_DR_INFO_PFX.<lower branch-id>
+2. the inode-number list of directories on a branch
+ path: /rw/dirB/$AUFS_WH_DR_BRHINO
+
+The filename of "detailed info per directory" represents the lower
+branch, and its format is
+- a type of the branch id
+ one of these.
+ + uuid (not implemented yet)
+ + fsid
+ + dev
+- the inode-number of the branch root dir
+
+And it contains these info in a single regular file.
+- magic number
+- branch's inode-number of the logically renamed dir
+- the name of the before-renamed dir
+
+The "detailed info per directory" file is created in aufs rename(2), and
+loaded in any lookup.
+The info is considered in lookup for the matching case only. Here
+"matching" means that the root of branch (in the info filename) is same
+to the current looking-up branch. After looking-up the before-renamed
+name, the inode-number is compared. And the matched dentry is used.
+
+The "inode-number list of directories" is a regular file which contains
+simply the inode-numbers on the branch. The file is created or updated
+in removing the branch, and loaded in adding the branch. Its lifetime is
+equal to the branch.
+The list is refered in lookup, and when the current target inode is
+found in the list, the aufs tries loading the "detailed info per
+directory" and get the changed and valid name of the dir.
+
+Theoretically these "extra informaiton" may be able to be put into XATTR
+in the dir inode. But aufs doesn't choose this way because
+1. XATTR may not be supported by the branch (or its configuration)
+2. XATTR may have its size limit.
+3. XATTR may be less easy to convert than a regular file, when the
+ format of the info is changed in the future.
+At the same time, I agree that the regular file approach is much slower
+than XATTR approach. So, in the future, aufs may take the XATTR or other
+better approach.
+
+This DIRREN feature is enabled by aufs configuration, and is activated
+by a new mount option.
+
+For the more complicated case, there is a work with UDBA option, which
+is to dected the direct access to the branches (by-passing aufs) and to
+maintain the cashes in aufs. Since a single cached aufs dentry may
+contains two names, before- and after-rename, the name comparision in
+UDBA handler may not work correctly. In this case, the behaviour will be
+equivalen to udba=reval case.
diff --git a/Documentation/filesystems/aufs/design/06fhsm.txt b/Documentation/filesystems/aufs/design/06fhsm.txt
new file mode 100644
index 000000000000..8b498d0af812
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06fhsm.txt
@@ -0,0 +1,120 @@
+
+# Copyright (C) 2011-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+File-based Hierarchical Storage Management (FHSM)
+----------------------------------------------------------------------
+Hierarchical Storage Management (or HSM) is a well-known feature in the
+storage world. Aufs provides this feature as file-based with multiple
+writable branches, based upon the principle of "Colder, the Lower".
+Here the word "colder" means that the less used files, and "lower" means
+that the position in the order of the stacked branches vertically.
+These multiple writable branches are prioritized, ie. the topmost one
+should be the fastest drive and be used heavily.
+
+o Characters in aufs FHSM story
+- aufs itself and a new branch attribute.
+- a new ioctl interface to move-down and to establish a connection with
+ the daemon ("move-down" is a converse of "copy-up").
+- userspace tool and daemon.
+
+The userspace daemon establishes a connection with aufs and waits for
+the notification. The notified information is very similar to struct
+statfs containing the number of consumed blocks and inodes.
+When the consumed blocks/inodes of a branch exceeds the user-specified
+upper watermark, the daemon activates its move-down process until the
+consumed blocks/inodes reaches the user-specified lower watermark.
+
+The actual move-down is done by aufs based upon the request from
+user-space since we need to maintain the inode number and the internal
+pointer arrays in aufs.
+
+Currently aufs FHSM handles the regular files only. Additionally they
+must not be hard-linked nor pseudo-linked.
+
+
+o Cowork of aufs and the user-space daemon
+ During the userspace daemon established the connection, aufs sends a
+ small notification to it whenever aufs writes something into the
+ writable branch. But it may cost high since aufs issues statfs(2)
+ internally. So user can specify a new option to cache the
+ info. Actually the notification is controlled by these factors.
+ + the specified cache time.
+ + classified as "force" by aufs internally.
+ Until the specified time expires, aufs doesn't send the info
+ except the forced cases. When aufs decide forcing, the info is always
+ notified to userspace.
+ For example, the number of free inodes is generally large enough and
+ the shortage of it happens rarely. So aufs doesn't force the
+ notification when creating a new file, directory and others. This is
+ the typical case which aufs doesn't force.
+ When aufs writes the actual filedata and the files consumes any of new
+ blocks, the aufs forces notifying.
+
+
+o Interfaces in aufs
+- New branch attribute.
+ + fhsm
+ Specifies that the branch is managed by FHSM feature. In other word,
+ participant in the FHSM.
+ When nofhsm is set to the branch, it will not be the source/target
+ branch of the move-down operation. This attribute is set
+ independently from coo and moo attributes, and if you want full
+ FHSM, you should specify them as well.
+- New mount option.
+ + fhsm_sec
+ Specifies a second to suppress many less important info to be
+ notified.
+- New ioctl.
+ + AUFS_CTL_FHSM_FD
+ create a new file descriptor which userspace can read the notification
+ (a subset of struct statfs) from aufs.
+- Module parameter 'brs'
+ It has to be set to 1. Otherwise the new mount option 'fhsm' will not
+ be set.
+- mount helpers /sbin/mount.aufs and /sbin/umount.aufs
+ When there are two or more branches with fhsm attributes,
+ /sbin/mount.aufs invokes the user-space daemon and /sbin/umount.aufs
+ terminates it. As a result of remounting and branch-manipulation, the
+ number of branches with fhsm attribute can be one. In this case,
+ /sbin/mount.aufs will terminate the user-space daemon.
+
+
+Finally the operation is done as these steps in kernel-space.
+- make sure that,
+ + no one else is using the file.
+ + the file is not hard-linked.
+ + the file is not pseudo-linked.
+ + the file is a regular file.
+ + the parent dir is not opaqued.
+- find the target writable branch.
+- make sure the file is not whiteout-ed by the upper (than the target)
+ branch.
+- make the parent dir on the target branch.
+- mutex lock the inode on the branch.
+- unlink the whiteout on the target branch (if exists).
+- lookup and create the whiteout-ed temporary name on the target branch.
+- copy the file as the whiteout-ed temporary name on the target branch.
+- rename the whiteout-ed temporary name to the original name.
+- unlink the file on the source branch.
+- maintain the internal pointer array and the external inode number
+ table (XINO).
+- maintain the timestamps and other attributes of the parent dir and the
+ file.
+
+And of course, in every step, an error may happen. So the operation
+should restore the original file state after an error happens.
diff --git a/Documentation/filesystems/aufs/design/06mmap.txt b/Documentation/filesystems/aufs/design/06mmap.txt
new file mode 100644
index 000000000000..cdd84ea777fc
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06mmap.txt
@@ -0,0 +1,72 @@
+
+# Copyright (C) 2005-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+mmap(2) -- File Memory Mapping
+----------------------------------------------------------------------
+In aufs, the file-mapped pages are handled by a branch fs directly, no
+interaction with aufs. It means aufs_mmap() calls the branch fs's
+->mmap().
+This approach is simple and good, but there is one problem.
+Under /proc, several entries show the mmapped files by its path (with
+device and inode number), and the printed path will be the path on the
+branch fs's instead of virtual aufs's.
+This is not a problem in most cases, but some utilities lsof(1) (and its
+user) may expect the path on aufs.
+
+To address this issue, aufs adds a new member called vm_prfile in struct
+vm_area_struct (and struct vm_region). The original vm_file points to
+the file on the branch fs in order to handle everything correctly as
+usual. The new vm_prfile points to a virtual file in aufs, and the
+show-functions in procfs refers to vm_prfile if it is set.
+Also we need to maintain several other places where touching vm_file
+such like
+- fork()/clone() copies vma and the reference count of vm_file is
+ incremented.
+- merging vma maintains the ref count too.
+
+This is not a good approach. It just fakes the printed path. But it
+leaves all behaviour around f_mapping unchanged. This is surely an
+advantage.
+Actually aufs had adopted another complicated approach which calls
+generic_file_mmap() and handles struct vm_operations_struct. In this
+approach, aufs met a hard problem and I could not solve it without
+switching the approach.
+
+There may be one more another approach which is
+- bind-mount the branch-root onto the aufs-root internally
+- grab the new vfsmount (ie. struct mount)
+- lazy-umount the branch-root internally
+- in open(2) the aufs-file, open the branch-file with the hidden
+ vfsmount (instead of the original branch's vfsmount)
+- ideally this "bind-mount and lazy-umount" should be done atomically,
+ but it may be possible from userspace by the mount helper.
+
+Adding the internal hidden vfsmount and using it in opening a file, the
+file path under /proc will be printed correctly. This approach looks
+smarter, but is not possible I am afraid.
+- aufs-root may be bind-mount later. when it happens, another hidden
+ vfsmount will be required.
+- it is hard to get the chance to bind-mount and lazy-umount
+ + in kernel-space, FS can have vfsmount in open(2) via
+ file->f_path, and aufs can know its vfsmount. But several locks are
+ already acquired, and if aufs tries to bind-mount and lazy-umount
+ here, then it may cause a deadlock.
+ + in user-space, bind-mount doesn't invoke the mount helper.
+- since /proc shows dev and ino, aufs has to give vma these info. it
+ means a new member vm_prinode will be necessary. this is essentially
+ equivalent to vm_prfile described above.
+
+I have to give up this "looks-smater" approach.
diff --git a/Documentation/filesystems/aufs/design/06xattr.txt b/Documentation/filesystems/aufs/design/06xattr.txt
new file mode 100644
index 000000000000..edd7553f3289
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06xattr.txt
@@ -0,0 +1,96 @@
+
+# Copyright (C) 2014-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Listing XATTR/EA and getting the value
+----------------------------------------------------------------------
+For the inode standard attributes (owner, group, timestamps, etc.), aufs
+shows the values from the topmost existing file. This behaviour is good
+for the non-dir entries since the bahaviour exactly matches the shown
+information. But for the directories, aufs considers all the same named
+entries on the lower branches. Which means, if one of the lower entry
+rejects readdir call, then aufs returns an error even if the topmost
+entry allows it. This behaviour is necessary to respect the branch fs's
+security, but can make users confused since the user-visible standard
+attributes don't match the behaviour.
+To address this issue, aufs has a mount option called dirperm1 which
+checks the permission for the topmost entry only, and ignores the lower
+entry's permission.
+
+A similar issue can happen around XATTR.
+getxattr(2) and listxattr(2) families behave as if dirperm1 option is
+always set. Otherwise these very unpleasant situation would happen.
+- listxattr(2) may return the duplicated entries.
+- users may not be able to remove or reset the XATTR forever,
+
+
+XATTR/EA support in the internal (copy,move)-(up,down)
+----------------------------------------------------------------------
+Generally the extended attributes of inode are categorized as these.
+- "security" for LSM and capability.
+- "system" for posix ACL, 'acl' mount option is required for the branch
+ fs generally.
+- "trusted" for userspace, CAP_SYS_ADMIN is required.
+- "user" for userspace, 'user_xattr' mount option is required for the
+ branch fs generally.
+
+Moreover there are some other categories. Aufs handles these rather
+unpopular categories as the ordinary ones, ie. there is no special
+condition nor exception.
+
+In copy-up, the support for XATTR on the dst branch may differ from the
+src branch. In this case, the copy-up operation will get an error and
+the original user operation which triggered the copy-up will fail. It
+can happen that even all copy-up will fail.
+When both of src and dst branches support XATTR and if an error occurs
+during copying XATTR, then the copy-up should fail obviously. That is a
+good reason and aufs should return an error to userspace. But when only
+the src branch support that XATTR, aufs should not return an error.
+For example, the src branch supports ACL but the dst branch doesn't
+because the dst branch may natively un-support it or temporary
+un-support it due to "noacl" mount option. Of course, the dst branch fs
+may NOT return an error even if the XATTR is not supported. It is
+totally up to the branch fs.
+
+Anyway when the aufs internal copy-up gets an error from the dst branch
+fs, then aufs tries removing the just copied entry and returns the error
+to the userspace. The worst case of this situation will be all copy-up
+will fail.
+
+For the copy-up operation, there two basic approaches.
+- copy the specified XATTR only (by category above), and return the
+ error unconditionally if it happens.
+- copy all XATTR, and ignore the error on the specified category only.
+
+In order to support XATTR and to implement the correct behaviour, aufs
+chooses the latter approach and introduces some new branch attributes,
+"icexsec", "icexsys", "icextr", "icexusr", and "icexoth".
+They correspond to the XATTR namespaces (see above). Additionally, to be
+convenient, "icex" is also provided which means all "icex*" attributes
+are set (here the word "icex" stands for "ignore copy-error on XATTR").
+
+The meaning of these attributes is to ignore the error from setting
+XATTR on that branch.
+Note that aufs tries copying all XATTR unconditionally, and ignores the
+error from the dst branch according to the specified attributes.
+
+Some XATTR may have its default value. The default value may come from
+the parent dir or the environment. If the default value is set at the
+file creating-time, it will be overwritten by copy-up.
+Some contradiction may happen I am afraid.
+Do we need another attribute to stop copying XATTR? I am unsure. For
+now, aufs implements the branch attributes to ignore the error.
diff --git a/Documentation/filesystems/aufs/design/07export.txt b/Documentation/filesystems/aufs/design/07export.txt
new file mode 100644
index 000000000000..9b983f3dd78a
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/07export.txt
@@ -0,0 +1,58 @@
+
+# Copyright (C) 2005-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Export Aufs via NFS
+----------------------------------------------------------------------
+Here is an approach.
+- like xino/xib, add a new file 'xigen' which stores aufs inode
+ generation.
+- iget_locked(): initialize aufs inode generation for a new inode, and
+ store it in xigen file.
+- destroy_inode(): increment aufs inode generation and store it in xigen
+ file. it is necessary even if it is not unlinked, because any data of
+ inode may be changed by UDBA.
+- encode_fh(): for a root dir, simply return FILEID_ROOT. otherwise
+ build file handle by
+ + branch id (4 bytes)
+ + superblock generation (4 bytes)
+ + inode number (4 or 8 bytes)
+ + parent dir inode number (4 or 8 bytes)
+ + inode generation (4 bytes))
+ + return value of exportfs_encode_fh() for the parent on a branch (4
+ bytes)
+ + file handle for a branch (by exportfs_encode_fh())
+- fh_to_dentry():
+ + find the index of a branch from its id in handle, and check it is
+ still exist in aufs.
+ + 1st level: get the inode number from handle and search it in cache.
+ + 2nd level: if not found in cache, get the parent inode number from
+ the handle and search it in cache. and then open the found parent
+ dir, find the matching inode number by vfs_readdir() and get its
+ name, and call lookup_one_len() for the target dentry.
+ + 3rd level: if the parent dir is not cached, call
+ exportfs_decode_fh() for a branch and get the parent on a branch,
+ build a pathname of it, convert it a pathname in aufs, call
+ path_lookup(). now aufs gets a parent dir dentry, then handle it as
+ the 2nd level.
+ + to open the dir, aufs needs struct vfsmount. aufs keeps vfsmount
+ for every branch, but not itself. to get this, (currently) aufs
+ searches in current->nsproxy->mnt_ns list. it may not be a good
+ idea, but I didn't get other approach.
+ + test the generation of the gotten inode.
+- every inode operation: they may get EBUSY due to UDBA. in this case,
+ convert it into ESTALE for NFSD.
+- readdir(): call lockdep_on/off() because filldir in NFSD calls
+ lookup_one_len(), vfs_getattr(), encode_fh() and others.
diff --git a/Documentation/filesystems/aufs/design/08shwh.txt b/Documentation/filesystems/aufs/design/08shwh.txt
new file mode 100644
index 000000000000..647a86a65db7
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/08shwh.txt
@@ -0,0 +1,52 @@
+
+# Copyright (C) 2005-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Show Whiteout Mode (shwh)
+----------------------------------------------------------------------
+Generally aufs hides the name of whiteouts. But in some cases, to show
+them is very useful for users. For instance, creating a new middle layer
+(branch) by merging existing layers.
+
+(borrowing aufs1 HOW-TO from a user, Michael Towers)
+When you have three branches,
+- Bottom: 'system', squashfs (underlying base system), read-only
+- Middle: 'mods', squashfs, read-only
+- Top: 'overlay', ram (tmpfs), read-write
+
+The top layer is loaded at boot time and saved at shutdown, to preserve
+the changes made to the system during the session.
+When larger changes have been made, or smaller changes have accumulated,
+the size of the saved top layer data grows. At this point, it would be
+nice to be able to merge the two overlay branches ('mods' and 'overlay')
+and rewrite the 'mods' squashfs, clearing the top layer and thus
+restoring save and load speed.
+
+This merging is simplified by the use of another aufs mount, of just the
+two overlay branches using the 'shwh' option.
+# mount -t aufs -o ro,shwh,br:/livesys/overlay=ro+wh:/livesys/mods=rr+wh \
+ aufs /livesys/merge_union
+
+A merged view of these two branches is then available at
+/livesys/merge_union, and the new feature is that the whiteouts are
+visible!
+Note that in 'shwh' mode the aufs mount must be 'ro', which will disable
+writing to all branches. Also the default mode for all branches is 'ro'.
+It is now possible to save the combined contents of the two overlay
+branches to a new squashfs, e.g.:
+# mksquashfs /livesys/merge_union /path/to/newmods.squash
+
+This new squashfs archive can be stored on the boot device and the
+initramfs will use it to replace the old one at the next boot.
diff --git a/Documentation/filesystems/aufs/design/10dynop.txt b/Documentation/filesystems/aufs/design/10dynop.txt
new file mode 100644
index 000000000000..13e8583f2b91
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/10dynop.txt
@@ -0,0 +1,47 @@
+
+# Copyright (C) 2010-2019 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Dynamically customizable FS operations
+----------------------------------------------------------------------
+Generally FS operations (struct inode_operations, struct
+address_space_operations, struct file_operations, etc.) are defined as
+"static const", but it never means that FS have only one set of
+operation. Some FS have multiple sets of them. For instance, ext2 has
+three sets, one for XIP, for NOBH, and for normal.
+Since aufs overrides and redirects these operations, sometimes aufs has
+to change its behaviour according to the branch FS type. More importantly
+VFS acts differently if a function (member in the struct) is set or
+not. It means aufs should have several sets of operations and select one
+among them according to the branch FS definition.
+
+In order to solve this problem and not to affect the behaviour of VFS,
+aufs defines these operations dynamically. For instance, aufs defines
+dummy direct_IO function for struct address_space_operations, but it may
+not be set to the address_space_operations actually. When the branch FS
+doesn't have it, aufs doesn't set it to its address_space_operations
+while the function definition itself is still alive. So the behaviour
+itself will not change, and it will return an error when direct_IO is
+not set.
+
+The lifetime of these dynamically generated operation object is
+maintained by aufs branch object. When the branch is removed from aufs,
+the reference counter of the object is decremented. When it reaches
+zero, the dynamically generated operation object will be freed.
+
+This approach is designed to support AIO (io_submit), Direct I/O and
+XIP (DAX) mainly.
+Currently this approach is applied to address_space_operations for
+regular files only.
diff --git a/MAINTAINERS b/MAINTAINERS
index dce5c099f43c..cd272a98db1c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2635,6 +2635,19 @@ F: include/linux/audit.h
F: include/uapi/linux/audit.h
F: kernel/audit*
+AUFS (advanced multi layered unification filesystem) FILESYSTEM
+M: "J. R. Okajima" <hooanon05g@gmail.com>
+L: linux-unionfs@vger.kernel.org
+L: aufs-users@lists.sourceforge.net (members only)
+W: http://aufs.sourceforge.net
+T: git://github.com/sfjro/aufs4-linux.git
+S: Supported
+F: Documentation/filesystems/aufs/
+F: Documentation/ABI/testing/debugfs-aufs
+F: Documentation/ABI/testing/sysfs-aufs
+F: fs/aufs/
+F: include/uapi/linux/aufs_type.h
+
AUXILIARY DISPLAY DRIVERS
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
S: Maintained
diff --git a/Makefile b/Makefile
index bf21b5a86e4b..7f92a6c2416e 100644
--- a/Makefile
+++ b/Makefile
@@ -482,7 +482,7 @@ export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_ve
# Files to ignore in find ... statements
export RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o \
- -name CVS -o -name .pc -o -name .hg -o -name .git \) \
+ -name CVS -o -name .pc -o -name .hg -o -name .git -o -name meta \) \
-prune -o
export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
--exclude CVS --exclude .pc --exclude .hg --exclude .git
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 9db3c584b2cb..9253b7533cb1 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -64,7 +64,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
# macro, but instead defines a whole series of macros which makes
# testing for a specific architecture or later rather impossible.
arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m
-arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a)
+arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 -march=armv7-a -Wa$(comma)-march=armv7-a
arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6)
# Only override the compiler option if ARMv6. The ARMv6K extensions are
# always available in ARMv7
@@ -336,6 +336,9 @@ PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
bootpImage uImage: zImage
zImage: Image
+# These targets cannot be built in parallel
+.NOTPARALLEL: $(BOOT_TARGETS)
+
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
@$(kecho) ' Kernel: $(boot)/$@ is ready'
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 0b3cd7a33a26..67608510fad3 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -68,6 +68,10 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
endif
+ifeq ($(CONFIG_ARCH_VEXPRESS), y)
+LOADADDR=0x04080000
+endif
+
ifneq ($(LOADADDR),)
UIMAGE_LOADADDR=$(LOADADDR)
else
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 0465d65d23de..08f4fcb9b3b5 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -352,7 +352,8 @@ ENTRY(\sym)
*/
syscall_table_start sys_call_table
#define COMPAT(nr, native, compat) syscall nr, native
-#ifdef CONFIG_AEABI
+
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
#include <calls-eabi.S>
#else
#include <calls-oabi.S>
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 58f69fa07df9..2daa276f0e5b 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -455,8 +455,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (pud_none(*pud_k))
goto bad_area;
- if (!pud_present(*pud))
+ if (!pud_present(*pud)) {
set_pud(pud, *pud_k);
+ /*
+ * There is a small window during free_pgtables() where the
+ * user *pud entry is 0 but the TLB has not been invalidated
+ * and we get a level 2 (pmd) translation fault caused by the
+ * intermediate TLB caching of the old level 1 (pud) entry.
+ */
+ flush_tlb_kernel_page(addr);
+ }
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
@@ -479,8 +487,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
#endif
if (pmd_none(pmd_k[index]))
goto bad_area;
+ if (!pmd_present(pmd[index]))
+ copy_pmd(pmd, pmd_k);
- copy_pmd(pmd, pmd_k);
return 0;
bad_area:
diff --git a/arch/arm/tools/gen-mach-types b/arch/arm/tools/gen-mach-types
index cbe1c33bb871..cb2b69f2a39d 100644
--- a/arch/arm/tools/gen-mach-types
+++ b/arch/arm/tools/gen-mach-types
@@ -1,4 +1,4 @@
-#!/bin/awk
+#!/usr/bin/awk
# SPDX-License-Identifier: GPL-2.0
#
# Awk script to generate include/generated/mach-types.h
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 65b6e85447b1..211bb0d06627 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -484,6 +484,7 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p)
case CPU_R4700:
case CPU_R5000:
case CPU_NEVADA:
+ case CPU_4KC:
uasm_i_nop(p);
uasm_i_tlbp(p);
break;
@@ -532,6 +533,7 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R4600:
case CPU_R4700:
+ case CPU_4KC:
uasm_i_nop(p);
tlbw(p);
uasm_i_nop(p);
@@ -557,7 +559,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
- case CPU_4KC:
case CPU_4KEC:
case CPU_M14KC:
case CPU_M14KEC:
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 488c9edffa58..602649da99a9 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -292,6 +292,9 @@ PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
boot := arch/$(ARCH)/boot
+# These targets cannot be built in parallel
+.NOTPARALLEL: $(BOOT_TARGETS)
+
$(BOOT_TARGETS1): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
$(BOOT_TARGETS2): vmlinux
@@ -411,7 +414,7 @@ endif
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-archprepare: checkbin
+archprepare: checkbin arch/powerpc/lib/crtsavres.o
archheaders:
$(Q)$(MAKE) $(build)=arch/powerpc/kernel/syscalls all
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index cb7f0bb9ee71..51772281b849 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -3,7 +3,9 @@
# Makefile for the linux kernel.
#
-CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' $(call cc-disable-warning, attribute-alias)
+CFLAGS_ptrace.o += $(call cc-disable-warning, array-bounds)
+CFLAGS_syscalls.o += $(call cc-disable-warning, attribute-alias)
# Disable clang warning for using setjmp without setjmp.h header
CFLAGS_crash.o += $(call cc-disable-warning, builtin-requires-header)
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S
index cf0c9c9c24f9..78c88af3890b 100644
--- a/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/arch/powerpc/kernel/vdso32/sigtramp.S
@@ -251,6 +251,40 @@ V_FUNCTION_END(__kernel_sigtramp_rt32)
vsave_msr1 (31); \
vsave_msr2 (33, 32*16+12); \
vsave (32, 32*16)
+#elif defined(CONFIG_SPE)
+#define EH_FRAME_VMX \
+ rsave (113, VREGS); \
+ rsave (114, VREGS + 1*4); \
+ rsave (115, VREGS + 2*4); \
+ rsave (116, VREGS + 3*4); \
+ rsave (117, VREGS + 4*4); \
+ rsave (118, VREGS + 5*4); \
+ rsave (119, VREGS + 6*4); \
+ rsave (120, VREGS + 7*4); \
+ rsave (121, VREGS + 8*4); \
+ rsave (122, VREGS + 9*4); \
+ rsave (123, VREGS + 10*4); \
+ rsave (124, VREGS + 11*4); \
+ rsave (125, VREGS + 12*4); \
+ rsave (126, VREGS + 13*4); \
+ rsave (127, VREGS + 14*4); \
+ rsave (128, VREGS + 15*4); \
+ rsave (129, VREGS + 16*4); \
+ rsave (130, VREGS + 17*4); \
+ rsave (131, VREGS + 18*4); \
+ rsave (132, VREGS + 19*4); \
+ rsave (133, VREGS + 20*4); \
+ rsave (134, VREGS + 21*4); \
+ rsave (135, VREGS + 22*4); \
+ rsave (136, VREGS + 23*4); \
+ rsave (137, VREGS + 24*4); \
+ rsave (138, VREGS + 25*4); \
+ rsave (139, VREGS + 26*4); \
+ rsave (140, VREGS + 27*4); \
+ rsave (141, VREGS + 28*4); \
+ rsave (142, VREGS + 29*4); \
+ rsave (143, VREGS + 30*4); \
+ rsave (144, VREGS + 31*4);
#else
#define EH_FRAME_VMX
#endif
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 5986df48359b..d874033ab5f2 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -552,6 +552,7 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
+ mb();
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
/* For the boot cpu, we're running on the stack in init_thread_union,
diff --git a/arch/sh/tools/gen-mach-types b/arch/sh/tools/gen-mach-types
index 6d7c2d8eaf55..2a4a42a6d70c 100644
--- a/arch/sh/tools/gen-mach-types
+++ b/arch/sh/tools/gen-mach-types
@@ -1,4 +1,4 @@
-#!/bin/awk
+#!/usr/bin/awk
# SPDX-License-Identifier: GPL-2.0
#
# Awk script to generate include/generated/machtypes.h
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 19f650d729f5..8e5498be76bd 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -295,6 +295,34 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
syscall_return_slowpath(regs);
}
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void trace_hardirqs_on_caller(unsigned long caller_addr);
+__visible void trace_hardirqs_on_caller_cr2(unsigned long caller_addr)
+{
+ unsigned long address = read_cr2(); /* Get the faulting address */
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ trace_hardirqs_on_caller(caller_addr);
+ if (address != read_cr2())
+ write_cr2(address);
+ exception_exit(prev_state);
+}
+
+extern void trace_hardirqs_off_caller(unsigned long caller_addr);
+__visible void trace_hardirqs_off_caller_cr2(unsigned long caller_addr)
+{
+ unsigned long address = read_cr2(); /* Get the faulting address */
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ trace_hardirqs_off_caller(caller_addr);
+ if (address != read_cr2())
+ write_cr2(address);
+ exception_exit(prev_state);
+}
+#endif
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 4fe27b67d7e2..89494965e9f9 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1250,12 +1250,12 @@ ENTRY(error_entry)
* we fix gsbase, and we should do it before enter_from_user_mode
* (which can take locks).
*/
- TRACE_IRQS_OFF
+ TRACE_IRQS_OFF_CR2
CALL_enter_from_user_mode
ret
.Lerror_entry_done:
- TRACE_IRQS_OFF
+ TRACE_IRQS_OFF_CR2
ret
/*
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index be36bf4e0957..1300b53b98cb 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -41,6 +41,8 @@
#ifdef CONFIG_TRACE_IRQFLAGS
THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
+ THUNK trace_hardirqs_on_thunk_cr2,trace_hardirqs_on_caller_cr2,1
+ THUNK trace_hardirqs_off_thunk_cr2,trace_hardirqs_off_caller_cr2,1
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 8a0e56e1dcc9..fc07b491f4ff 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -176,9 +176,13 @@ static inline int arch_irqs_disabled(void)
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
+# define TRACE_IRQS_ON_CR2 call trace_hardirqs_on_thunk_cr2;
+# define TRACE_IRQS_OFF_CR2 call trace_hardirqs_off_thunk_cr2;
#else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
+# define TRACE_IRQS_ON_CR2
+# define TRACE_IRQS_OFF_CR2
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 0eda91f8eeac..34ab4161fc39 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/jiffies.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/export.h>
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index b02a36b2c14f..9166a071484c 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -1,4 +1,4 @@
-#!/bin/awk -f
+#!/usr/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
# gen-insn-attr-x86.awk: Instruction attribute table generator
# Written by Masami Hiramatsu <mhiramat@redhat.com>
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fc7aefd42ae0..18a963bfffbe 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -738,6 +738,24 @@ out_err:
return error;
}
+/*
+ * for AUFS
+ * no get/put for file.
+ */
+struct file *loop_backing_file(struct super_block *sb)
+{
+ struct file *ret;
+ struct loop_device *l;
+
+ ret = NULL;
+ if (MAJOR(sb->s_dev) == LOOP_MAJOR) {
+ l = sb->s_bdev->bd_disk->private_data;
+ ret = l->lo_backing_file;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(loop_backing_file);
+
/* loop sysfs attributes */
static ssize_t loop_attr_show(struct device *dev, char *page,
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 317c9720467c..f6cf4286b319 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -261,6 +261,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
pko_command.s.segs = 1;
pko_command.s.total_bytes = skb->len;
+#if GCC_VERSION >= 80000
+ barrier();
+#endif
pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
pko_command.s.subone0 = 1;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 3ce71cbfbb58..477c863b82c3 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -627,6 +627,9 @@ EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
*/
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
{
+#ifdef CONFIG_MIPS_MALTA
+ int timeout = 10;
+#endif
/* Turn off PIRQ enable and SMI enable. (This also turns off the
* BIOS's USB Legacy Support.) Turn off all the R/WC bits too.
*/
@@ -640,9 +643,16 @@ void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
mb();
udelay(5);
- if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
- dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
+#ifdef CONFIG_MIPS_MALTA
+ while (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET) {
+ if (--timeout < 0) {
+ dev_warn(&pdev->dev, "HCRESET timed out!\n");
+ break;
+ }
+ udelay(5);
+ }
+#endif
/* Just to be safe, disable interrupt requests and
* make sure the controller is stopped.
*/
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 35fcb826152c..1cd2808eb455 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -72,7 +72,9 @@ static void uhci_fsbr_off(struct uhci_hcd *uhci)
static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = urb->hcpriv;
-
+#ifdef CONFIG_MIPS_MALTA
+ return;
+#endif
urbp->fsbr = 1;
}
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 34dc8e53a1e9..70e99e8509d6 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -43,6 +43,7 @@ static const struct fb_fix_screeninfo uvesafb_fix = {
.visual = FB_VISUAL_TRUECOLOR,
};
+static int task_timeout = UVESAFB_TIMEOUT; /* timeout [ms] of task execution */
static int mtrr = 3; /* enable mtrr by default */
static bool blank = 1; /* enable blanking by default */
static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
@@ -146,7 +147,9 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
struct cn_msg *m;
int err;
int len = sizeof(task->t) + task->t.buf_len;
-
+ int forever_wait = task_timeout < 0 ? 1 : 0;
+ /* For infinite wait case, set interval to the legacy 5000 ms */
+ int timeout = forever_wait ? 5000 : task_timeout;
/*
* Check whether the message isn't longer than the maximum
* allowed by connector.
@@ -212,9 +215,21 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
} else if (err == -ENOBUFS)
err = 0;
- if (!err && !(task->t.flags & TF_EXIT))
- err = !wait_for_completion_timeout(task->done,
- msecs_to_jiffies(UVESAFB_TIMEOUT));
+ if (!err && !(task->t.flags & TF_EXIT)) {
+ do {
+ err = !wait_for_completion_timeout(task->done,
+ msecs_to_jiffies(timeout));
+ if (err)
+ printk_ratelimited(
+ KERN_WARNING
+ "uvesafb: %u ms task timeout%s\n",
+ timeout,
+ forever_wait ?
+ ", infinitely waiting." : ".");
+ else
+ break;
+ } while (forever_wait);
+ }
mutex_lock(&uvfb_lock);
uvfb_tasks[seq] = NULL;
@@ -1816,6 +1831,8 @@ static int uvesafb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
+ if (!strncmp(this_opt, "task_timeout", 12))
+ task_timeout = simple_strtol(this_opt+12, NULL, 0);
if (!strcmp(this_opt, "redraw"))
ypan = 0;
else if (!strcmp(this_opt, "ypan"))
@@ -1959,6 +1976,9 @@ static const struct kernel_param_ops param_ops_scroll = {
};
#define param_check_scroll(name, p) __param_check(name, p, void)
+module_param(task_timeout, int, 0400);
+MODULE_PARM_DESC(task_timeout,
+ "Timeout [ms] of a task's completion, or set to any negative value to infinitely wait");
module_param_named(scroll, ypan, scroll, 0);
MODULE_PARM_DESC(scroll,
"Scrolling mode, set to 'redraw', 'ypan', or 'ywrap'");
diff --git a/fs/Kconfig b/fs/Kconfig
index ac474a61be37..4171d7e64191 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -239,6 +239,7 @@ source "fs/hfsplus/Kconfig"
source "fs/befs/Kconfig"
source "fs/bfs/Kconfig"
source "fs/efs/Kconfig"
+source "fs/yaffs2/Kconfig"
source "fs/jffs2/Kconfig"
# UBIFS File system configuration
source "fs/ubifs/Kconfig"
@@ -255,6 +256,7 @@ source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
+source "fs/aufs/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index 293733f61594..5d9198a18e0d 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -128,3 +128,5 @@ obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
+obj-$(CONFIG_AUFS_FS) += aufs/
diff --git a/fs/Makefile.rej b/fs/Makefile.rej
new file mode 100644
index 000000000000..10121e2eec8f
--- /dev/null
+++ b/fs/Makefile.rej
@@ -0,0 +1,6 @@
+diff a/fs/Makefile b/fs/Makefile (rejected hunks)
+@@ -128,3 +128,4 @@ obj-y += exofs/ # Multiple modules
+ obj-$(CONFIG_CEPH_FS) += ceph/
+ obj-$(CONFIG_PSTORE) += pstore/
+ obj-$(CONFIG_EFIVAR_FS) += efivarfs/
++obj-$(CONFIG_AUFS_FS) += aufs/
diff --git a/fs/aufs/Kconfig b/fs/aufs/Kconfig
new file mode 100644
index 000000000000..9f436425716a
--- /dev/null
+++ b/fs/aufs/Kconfig
@@ -0,0 +1,199 @@
+# SPDX-License-Identifier: GPL-2.0
+config AUFS_FS
+ tristate "Aufs (Advanced multi layered unification filesystem) support"
+ help
+ Aufs is a stackable unification filesystem such as Unionfs,
+ which unifies several directories and provides a merged single
+ directory.
+ In the early days, aufs was entirely re-designed and
+ re-implemented Unionfs Version 1.x series. Introducing many
+ original ideas, approaches and improvements, it becomes totally
+ different from Unionfs while keeping the basic features.
+
+if AUFS_FS
+choice
+ prompt "Maximum number of branches"
+ default AUFS_BRANCH_MAX_127
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_127
+ bool "127"
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_511
+ bool "511"
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_1023
+ bool "1023"
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_32767
+ bool "32767"
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+endchoice
+
+config AUFS_SBILIST
+ bool
+ depends on AUFS_MAGIC_SYSRQ || PROC_FS
+ default y
+ help
+ Automatic configuration for internal use.
+ When aufs supports Magic SysRq or /proc, enabled automatically.
+
+config AUFS_HNOTIFY
+ bool "Detect direct branch access (bypassing aufs)"
+ help
+ If you want to modify files on branches directly, eg. bypassing aufs,
+ and want aufs to detect the changes of them fully, then enable this
+ option and use 'udba=notify' mount option.
+ Currently there is only one available configuration, "fsnotify".
+ It will have a negative impact to the performance.
+ See detail in aufs.5.
+
+choice
+ prompt "method" if AUFS_HNOTIFY
+ default AUFS_HFSNOTIFY
+config AUFS_HFSNOTIFY
+ bool "fsnotify"
+ select FSNOTIFY
+endchoice
+
+config AUFS_EXPORT
+ bool "NFS-exportable aufs"
+ depends on EXPORTFS
+ help
+ If you want to export your mounted aufs via NFS, then enable this
+ option. There are several requirements for this configuration.
+ See detail in aufs.5.
+
+config AUFS_INO_T_64
+ bool
+ depends on AUFS_EXPORT
+ depends on 64BIT && !(ALPHA || S390)
+ default y
+ help
+ Automatic configuration for internal use.
+ /* typedef unsigned long/int __kernel_ino_t */
+ /* alpha and s390x are int */
+
+config AUFS_XATTR
+ bool "support for XATTR/EA (including Security Labels)"
+ help
+ If your branch fs supports XATTR/EA and you want to make them
+ available in aufs too, then enable this opsion and specify the
+ branch attributes for EA.
+ See detail in aufs.5.
+
+config AUFS_FHSM
+ bool "File-based Hierarchical Storage Management"
+ help
+ Hierarchical Storage Management (or HSM) is a well-known feature
+ in the storage world. Aufs provides this feature as file-based.
+ with multiple branches.
+ These multiple branches are prioritized, ie. the topmost one
+ should be the fastest drive and be used heavily.
+
+config AUFS_RDU
+ bool "Readdir in userspace"
+ help
+ Aufs has two methods to provide a merged view for a directory,
+ by a user-space library and by kernel-space natively. The latter
+ is always enabled but sometimes large and slow.
+ If you enable this option, install the library in aufs2-util
+ package, and set some environment variables for your readdir(3),
+ then the work will be handled in user-space which generally
+ shows better performance in most cases.
+ See detail in aufs.5.
+
+config AUFS_DIRREN
+ bool "Workaround for rename(2)-ing a directory"
+ help
+ By default, aufs returns EXDEV error in renameing a dir who has
+ his child on the lower branch, since it is a bad idea to issue
+ rename(2) internally for every lower branch. But user may not
+ accept this behaviour. So here is a workaround to allow such
+ rename(2) and store some extra infromation on the writable
+ branch. Obviously this costs high (and I don't like it).
+ To use this feature, you need to enable this configuration AND
+ to specify the mount option `dirren.'
+ See details in aufs.5 and the design documents.
+
+config AUFS_SHWH
+ bool "Show whiteouts"
+ help
+ If you want to make the whiteouts in aufs visible, then enable
+ this option and specify 'shwh' mount option. Although it may
+ sounds like philosophy or something, but in technically it
+ simply shows the name of whiteout with keeping its behaviour.
+
+config AUFS_BR_RAMFS
+ bool "Ramfs (initramfs/rootfs) as an aufs branch"
+ help
+ If you want to use ramfs as an aufs branch fs, then enable this
+ option. Generally tmpfs is recommended.
+ Aufs prohibited them to be a branch fs by default, because
+ initramfs becomes unusable after switch_root or something
+ generally. If you sets initramfs as an aufs branch and boot your
+ system by switch_root, you will meet a problem easily since the
+ files in initramfs may be inaccessible.
+ Unless you are going to use ramfs as an aufs branch fs without
+ switch_root or something, leave it N.
+
+config AUFS_BR_FUSE
+ bool "Fuse fs as an aufs branch"
+ depends on FUSE_FS
+ select AUFS_POLL
+ help
+ If you want to use fuse-based userspace filesystem as an aufs
+ branch fs, then enable this option.
+ It implements the internal poll(2) operation which is
+ implemented by fuse only (curretnly).
+
+config AUFS_POLL
+ bool
+ help
+ Automatic configuration for internal use.
+
+config AUFS_BR_HFSPLUS
+ bool "Hfsplus as an aufs branch"
+ depends on HFSPLUS_FS
+ default y
+ help
+ If you want to use hfsplus fs as an aufs branch fs, then enable
+ this option. This option introduces a small overhead at
+ copying-up a file on hfsplus.
+
+config AUFS_BDEV_LOOP
+ bool
+ depends on BLK_DEV_LOOP
+ default y
+ help
+ Automatic configuration for internal use.
+ Convert =[ym] into =y.
+
+config AUFS_DEBUG
+ bool "Debug aufs"
+ help
+ Enable this to compile aufs internal debug code.
+ It will have a negative impact to the performance.
+
+config AUFS_MAGIC_SYSRQ
+ bool
+ depends on AUFS_DEBUG && MAGIC_SYSRQ
+ default y
+ help
+ Automatic configuration for internal use.
+ When aufs supports Magic SysRq, enabled automatically.
+endif
diff --git a/fs/aufs/Makefile b/fs/aufs/Makefile
new file mode 100644
index 000000000000..2c819a64935e
--- /dev/null
+++ b/fs/aufs/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0
+
+include ${src}/magic.mk
+ifeq (${CONFIG_AUFS_FS},m)
+include ${src}/conf.mk
+endif
+-include ${src}/priv_def.mk
+
+# cf. include/linux/kernel.h
+# enable pr_debug
+ccflags-y += -DDEBUG
+# sparse requires the full pathname
+ifdef M
+ccflags-y += -include ${M}/../../include/uapi/linux/aufs_type.h
+else
+ccflags-y += -include ${srctree}/include/uapi/linux/aufs_type.h
+endif
+
+obj-$(CONFIG_AUFS_FS) += aufs.o
+aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \
+ wkq.o vfsub.o dcsub.o \
+ cpup.o whout.o wbr_policy.o \
+ dinfo.o dentry.o \
+ dynop.o \
+ finfo.o file.o f_op.o \
+ dir.o vdir.o \
+ iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \
+ mvdown.o ioctl.o
+
+# all are boolean
+aufs-$(CONFIG_PROC_FS) += procfs.o plink.o
+aufs-$(CONFIG_SYSFS) += sysfs.o
+aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o
+aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o
+aufs-$(CONFIG_AUFS_HNOTIFY) += hnotify.o
+aufs-$(CONFIG_AUFS_HFSNOTIFY) += hfsnotify.o
+aufs-$(CONFIG_AUFS_EXPORT) += export.o
+aufs-$(CONFIG_AUFS_XATTR) += xattr.o
+aufs-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
+aufs-$(CONFIG_AUFS_DIRREN) += dirren.o
+aufs-$(CONFIG_AUFS_FHSM) += fhsm.o
+aufs-$(CONFIG_AUFS_POLL) += poll.o
+aufs-$(CONFIG_AUFS_RDU) += rdu.o
+aufs-$(CONFIG_AUFS_BR_HFSPLUS) += hfsplus.o
+aufs-$(CONFIG_AUFS_DEBUG) += debug.o
+aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o
diff --git a/fs/aufs/aufs.h b/fs/aufs/aufs.h
new file mode 100644
index 000000000000..a62a85211b88
--- /dev/null
+++ b/fs/aufs/aufs.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * all header files
+ */
+
+#ifndef __AUFS_H__
+#define __AUFS_H__
+
+#ifdef __KERNEL__
+
+#define AuStub(type, name, body, ...) \
+ static inline type name(__VA_ARGS__) { body; }
+
+#define AuStubVoid(name, ...) \
+ AuStub(void, name, , __VA_ARGS__)
+#define AuStubInt0(name, ...) \
+ AuStub(int, name, return 0, __VA_ARGS__)
+
+#include "debug.h"
+
+#include "branch.h"
+#include "cpup.h"
+#include "dcsub.h"
+#include "dbgaufs.h"
+#include "dentry.h"
+#include "dir.h"
+#include "dirren.h"
+#include "dynop.h"
+#include "file.h"
+#include "fstype.h"
+#include "hbl.h"
+#include "inode.h"
+#include "lcnt.h"
+#include "loop.h"
+#include "module.h"
+#include "opts.h"
+#include "rwsem.h"
+#include "super.h"
+#include "sysaufs.h"
+#include "vfsub.h"
+#include "whout.h"
+#include "wkq.h"
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_H__ */
diff --git a/fs/aufs/branch.c b/fs/aufs/branch.c
new file mode 100644
index 000000000000..1f783609e634
--- /dev/null
+++ b/fs/aufs/branch.c
@@ -0,0 +1,1422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * branch management
+ */
+
+#include <linux/compat.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/*
+ * free a single branch
+ */
+static void au_br_do_free(struct au_branch *br)
+{
+ int i;
+ struct au_wbr *wbr;
+ struct au_dykey **key;
+
+ au_hnotify_fin_br(br);
+ /* always, regardless the mount option */
+ au_dr_hino_free(&br->br_dirren);
+ au_xino_put(br);
+
+ AuLCntZero(au_lcnt_read(&br->br_nfiles, /*do_rev*/0));
+ au_lcnt_fin(&br->br_nfiles, /*do_sync*/0);
+ AuLCntZero(au_lcnt_read(&br->br_count, /*do_rev*/0));
+ au_lcnt_fin(&br->br_count, /*do_sync*/0);
+
+ wbr = br->br_wbr;
+ if (wbr) {
+ for (i = 0; i < AuBrWh_Last; i++)
+ dput(wbr->wbr_wh[i]);
+ AuDebugOn(atomic_read(&wbr->wbr_wh_running));
+ AuRwDestroy(&wbr->wbr_wh_rwsem);
+ }
+
+ if (br->br_fhsm) {
+ au_br_fhsm_fin(br->br_fhsm);
+ au_kfree_try_rcu(br->br_fhsm);
+ }
+
+ key = br->br_dykey;
+ for (i = 0; i < AuBrDynOp; i++, key++)
+ if (*key)
+ au_dy_put(*key);
+ else
+ break;
+
+ /* recursive lock, s_umount of branch's */
+ /* synchronize_rcu(); */ /* why? */
+ lockdep_off();
+ path_put(&br->br_path);
+ lockdep_on();
+ au_kfree_rcu(wbr);
+ au_lcnt_wait_for_fin(&br->br_nfiles);
+ au_lcnt_wait_for_fin(&br->br_count);
+ /* I don't know why, but percpu_refcount requires this */
+ /* synchronize_rcu(); */
+ au_kfree_rcu(br);
+}
+
+/*
+ * frees all branches
+ */
+void au_br_free(struct au_sbinfo *sbinfo)
+{
+ aufs_bindex_t bmax;
+ struct au_branch **br;
+
+ AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+ bmax = sbinfo->si_bbot + 1;
+ br = sbinfo->si_branch;
+ while (bmax--)
+ au_br_do_free(*br++);
+}
+
+/*
+ * find the index of a branch which is specified by @br_id.
+ */
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id)
+{
+ aufs_bindex_t bindex, bbot;
+
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++)
+ if (au_sbr_id(sb, bindex) == br_id)
+ return bindex;
+ return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * add a branch
+ */
+
+static int test_overlap(struct super_block *sb, struct dentry *h_adding,
+ struct dentry *h_root)
+{
+ if (unlikely(h_adding == h_root
+ || au_test_loopback_overlap(sb, h_adding)))
+ return 1;
+ if (h_adding->d_sb != h_root->d_sb)
+ return 0;
+ return au_test_subdir(h_adding, h_root)
+ || au_test_subdir(h_root, h_adding);
+}
+
+/*
+ * returns a newly allocated branch. @new_nbranch is a number of branches
+ * after adding a branch.
+ */
+static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch,
+ int perm)
+{
+ struct au_branch *add_branch;
+ struct dentry *root;
+ struct inode *inode;
+ int err;
+
+ err = -ENOMEM;
+ add_branch = kzalloc(sizeof(*add_branch), GFP_NOFS);
+ if (unlikely(!add_branch))
+ goto out;
+ add_branch->br_xino = au_xino_alloc(/*nfile*/1);
+ if (unlikely(!add_branch->br_xino))
+ goto out_br;
+ err = au_hnotify_init_br(add_branch, perm);
+ if (unlikely(err))
+ goto out_xino;
+
+ if (au_br_writable(perm)) {
+ /* may be freed separately at changing the branch permission */
+ add_branch->br_wbr = kzalloc(sizeof(*add_branch->br_wbr),
+ GFP_NOFS);
+ if (unlikely(!add_branch->br_wbr))
+ goto out_hnotify;
+ }
+
+ if (au_br_fhsm(perm)) {
+ err = au_fhsm_br_alloc(add_branch);
+ if (unlikely(err))
+ goto out_wbr;
+ }
+
+ root = sb->s_root;
+ err = au_sbr_realloc(au_sbi(sb), new_nbranch, /*may_shrink*/0);
+ if (!err)
+ err = au_di_realloc(au_di(root), new_nbranch, /*may_shrink*/0);
+ if (!err) {
+ inode = d_inode(root);
+ err = au_hinode_realloc(au_ii(inode), new_nbranch,
+ /*may_shrink*/0);
+ }
+ if (!err)
+ return add_branch; /* success */
+
+out_wbr:
+ au_kfree_rcu(add_branch->br_wbr);
+out_hnotify:
+ au_hnotify_fin_br(add_branch);
+out_xino:
+ au_xino_put(add_branch);
+out_br:
+ au_kfree_rcu(add_branch);
+out:
+ return ERR_PTR(err);
+}
+
+/*
+ * test if the branch permission is legal or not.
+ */
+static int test_br(struct inode *inode, int brperm, char *path)
+{
+ int err;
+
+ err = (au_br_writable(brperm) && IS_RDONLY(inode));
+ if (!err)
+ goto out;
+
+ err = -EINVAL;
+ pr_err("write permission for readonly mount or inode, %s\n", path);
+
+out:
+ return err;
+}
+
+/*
+ * returns:
+ * 0: success, the caller will add it
+ * plus: success, it is already unified, the caller should ignore it
+ * minus: error
+ */
+static int test_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+ int err;
+ aufs_bindex_t bbot, bindex;
+ struct dentry *root, *h_dentry;
+ struct inode *inode, *h_inode;
+
+ root = sb->s_root;
+ bbot = au_sbbot(sb);
+ if (unlikely(bbot >= 0
+ && au_find_dbindex(root, add->path.dentry) >= 0)) {
+ err = 1;
+ if (!remount) {
+ err = -EINVAL;
+ pr_err("%s duplicated\n", add->pathname);
+ }
+ goto out;
+ }
+
+ err = -ENOSPC; /* -E2BIG; */
+ if (unlikely(AUFS_BRANCH_MAX <= add->bindex
+ || AUFS_BRANCH_MAX - 1 <= bbot)) {
+ pr_err("number of branches exceeded %s\n", add->pathname);
+ goto out;
+ }
+
+ err = -EDOM;
+ if (unlikely(add->bindex < 0 || bbot + 1 < add->bindex)) {
+ pr_err("bad index %d\n", add->bindex);
+ goto out;
+ }
+
+ inode = d_inode(add->path.dentry);
+ err = -ENOENT;
+ if (unlikely(!inode->i_nlink)) {
+ pr_err("no existence %s\n", add->pathname);
+ goto out;
+ }
+
+ err = -EINVAL;
+ if (unlikely(inode->i_sb == sb)) {
+ pr_err("%s must be outside\n", add->pathname);
+ goto out;
+ }
+
+ if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) {
+ pr_err("unsupported filesystem, %s (%s)\n",
+ add->pathname, au_sbtype(inode->i_sb));
+ goto out;
+ }
+
+ if (unlikely(inode->i_sb->s_stack_depth)) {
+ pr_err("already stacked, %s (%s)\n",
+ add->pathname, au_sbtype(inode->i_sb));
+ goto out;
+ }
+
+ err = test_br(d_inode(add->path.dentry), add->perm, add->pathname);
+ if (unlikely(err))
+ goto out;
+
+ if (bbot < 0)
+ return 0; /* success */
+
+ err = -EINVAL;
+ for (bindex = 0; bindex <= bbot; bindex++)
+ if (unlikely(test_overlap(sb, add->path.dentry,
+ au_h_dptr(root, bindex)))) {
+ pr_err("%s is overlapped\n", add->pathname);
+ goto out;
+ }
+
+ err = 0;
+ if (au_opt_test(au_mntflags(sb), WARN_PERM)) {
+ h_dentry = au_h_dptr(root, 0);
+ h_inode = d_inode(h_dentry);
+ if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO)
+ || !uid_eq(h_inode->i_uid, inode->i_uid)
+ || !gid_eq(h_inode->i_gid, inode->i_gid))
+ pr_warn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n",
+ add->pathname,
+ i_uid_read(inode), i_gid_read(inode),
+ (inode->i_mode & S_IALLUGO),
+ i_uid_read(h_inode), i_gid_read(h_inode),
+ (h_inode->i_mode & S_IALLUGO));
+ }
+
+out:
+ return err;
+}
+
+/*
+ * initialize or clean the whiteouts for an adding branch
+ */
+static int au_br_init_wh(struct super_block *sb, struct au_branch *br,
+ int new_perm)
+{
+ int err, old_perm;
+ aufs_bindex_t bindex;
+ struct inode *h_inode;
+ struct au_wbr *wbr;
+ struct au_hinode *hdir;
+ struct dentry *h_dentry;
+
+ err = vfsub_mnt_want_write(au_br_mnt(br));
+ if (unlikely(err))
+ goto out;
+
+ wbr = br->br_wbr;
+ old_perm = br->br_perm;
+ br->br_perm = new_perm;
+ hdir = NULL;
+ h_inode = NULL;
+ bindex = au_br_index(sb, br->br_id);
+ if (0 <= bindex) {
+ hdir = au_hi(d_inode(sb->s_root), bindex);
+ au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+ } else {
+ h_dentry = au_br_dentry(br);
+ h_inode = d_inode(h_dentry);
+ inode_lock_nested(h_inode, AuLsc_I_PARENT);
+ }
+ if (!wbr)
+ err = au_wh_init(br, sb);
+ else {
+ wbr_wh_write_lock(wbr);
+ err = au_wh_init(br, sb);
+ wbr_wh_write_unlock(wbr);
+ }
+ if (hdir)
+ au_hn_inode_unlock(hdir);
+ else
+ inode_unlock(h_inode);
+ vfsub_mnt_drop_write(au_br_mnt(br));
+ br->br_perm = old_perm;
+
+ if (!err && wbr && !au_br_writable(new_perm)) {
+ au_kfree_rcu(wbr);
+ br->br_wbr = NULL;
+ }
+
+out:
+ return err;
+}
+
+static int au_wbr_init(struct au_branch *br, struct super_block *sb,
+ int perm)
+{
+ int err;
+ struct kstatfs kst;
+ struct au_wbr *wbr;
+
+ wbr = br->br_wbr;
+ au_rw_init(&wbr->wbr_wh_rwsem);
+ atomic_set(&wbr->wbr_wh_running, 0);
+
+ /*
+ * a limit for rmdir/rename a dir
+ * cf. AUFS_MAX_NAMELEN in include/uapi/linux/aufs_type.h
+ */
+ err = vfs_statfs(&br->br_path, &kst);
+ if (unlikely(err))
+ goto out;
+ err = -EINVAL;
+ if (kst.f_namelen >= NAME_MAX)
+ err = au_br_init_wh(sb, br, perm);
+ else
+ pr_err("%pd(%s), unsupported namelen %ld\n",
+ au_br_dentry(br),
+ au_sbtype(au_br_dentry(br)->d_sb), kst.f_namelen);
+
+out:
+ return err;
+}
+
+/* initialize a new branch */
+static int au_br_init(struct au_branch *br, struct super_block *sb,
+ struct au_opt_add *add)
+{
+ int err;
+ struct au_branch *brbase;
+ struct file *xf;
+ struct inode *h_inode;
+
+ err = 0;
+ br->br_perm = add->perm;
+ br->br_path = add->path; /* set first, path_get() later */
+ spin_lock_init(&br->br_dykey_lock);
+ au_lcnt_init(&br->br_nfiles, /*release*/NULL);
+ au_lcnt_init(&br->br_count, /*release*/NULL);
+ br->br_id = au_new_br_id(sb);
+ AuDebugOn(br->br_id < 0);
+
+ /* always, regardless the given option */
+ err = au_dr_br_init(sb, br, &add->path);
+ if (unlikely(err))
+ goto out_err;
+
+ if (au_br_writable(add->perm)) {
+ err = au_wbr_init(br, sb, add->perm);
+ if (unlikely(err))
+ goto out_err;
+ }
+
+ if (au_opt_test(au_mntflags(sb), XINO)) {
+ brbase = au_sbr(sb, 0);
+ xf = au_xino_file(brbase->br_xino, /*idx*/-1);
+ AuDebugOn(!xf);
+ h_inode = d_inode(add->path.dentry);
+ err = au_xino_init_br(sb, br, h_inode->i_ino, &xf->f_path);
+ if (unlikely(err)) {
+ AuDebugOn(au_xino_file(br->br_xino, /*idx*/-1));
+ goto out_err;
+ }
+ }
+
+ sysaufs_br_init(br);
+ path_get(&br->br_path);
+ goto out; /* success */
+
+out_err:
+ memset(&br->br_path, 0, sizeof(br->br_path));
+out:
+ return err;
+}
+
+static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex,
+ struct au_branch *br, aufs_bindex_t bbot,
+ aufs_bindex_t amount)
+{
+ struct au_branch **brp;
+
+ AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+ brp = sbinfo->si_branch + bindex;
+ memmove(brp + 1, brp, sizeof(*brp) * amount);
+ *brp = br;
+ sbinfo->si_bbot++;
+ if (unlikely(bbot < 0))
+ sbinfo->si_bbot = 0;
+}
+
+static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex,
+ aufs_bindex_t bbot, aufs_bindex_t amount)
+{
+ struct au_hdentry *hdp;
+
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ hdp = au_hdentry(dinfo, bindex);
+ memmove(hdp + 1, hdp, sizeof(*hdp) * amount);
+ au_h_dentry_init(hdp);
+ dinfo->di_bbot++;
+ if (unlikely(bbot < 0))
+ dinfo->di_btop = 0;
+}
+
+static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex,
+ aufs_bindex_t bbot, aufs_bindex_t amount)
+{
+ struct au_hinode *hip;
+
+ AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+ hip = au_hinode(iinfo, bindex);
+ memmove(hip + 1, hip, sizeof(*hip) * amount);
+ au_hinode_init(hip);
+ iinfo->ii_bbot++;
+ if (unlikely(bbot < 0))
+ iinfo->ii_btop = 0;
+}
+
+static void au_br_do_add(struct super_block *sb, struct au_branch *br,
+ aufs_bindex_t bindex)
+{
+ struct dentry *root, *h_dentry;
+ struct inode *root_inode, *h_inode;
+ aufs_bindex_t bbot, amount;
+
+ root = sb->s_root;
+ root_inode = d_inode(root);
+ bbot = au_sbbot(sb);
+ amount = bbot + 1 - bindex;
+ h_dentry = au_br_dentry(br);
+ au_sbilist_lock();
+ au_br_do_add_brp(au_sbi(sb), bindex, br, bbot, amount);
+ au_br_do_add_hdp(au_di(root), bindex, bbot, amount);
+ au_br_do_add_hip(au_ii(root_inode), bindex, bbot, amount);
+ au_set_h_dptr(root, bindex, dget(h_dentry));
+ h_inode = d_inode(h_dentry);
+ au_set_h_iptr(root_inode, bindex, au_igrab(h_inode), /*flags*/0);
+ au_sbilist_unlock();
+}
+
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+ int err;
+ aufs_bindex_t bbot, add_bindex;
+ struct dentry *root, *h_dentry;
+ struct inode *root_inode;
+ struct au_branch *add_branch;
+
+ root = sb->s_root;
+ root_inode = d_inode(root);
+ IMustLock(root_inode);
+ IiMustWriteLock(root_inode);
+ err = test_add(sb, add, remount);
+ if (unlikely(err < 0))
+ goto out;
+ if (err) {
+ err = 0;
+ goto out; /* success */
+ }
+
+ bbot = au_sbbot(sb);
+ add_branch = au_br_alloc(sb, bbot + 2, add->perm);
+ err = PTR_ERR(add_branch);
+ if (IS_ERR(add_branch))
+ goto out;
+
+ err = au_br_init(add_branch, sb, add);
+ if (unlikely(err)) {
+ au_br_do_free(add_branch);
+ goto out;
+ }
+
+ add_bindex = add->bindex;
+ sysaufs_brs_del(sb, add_bindex); /* remove successors */
+ au_br_do_add(sb, add_branch, add_bindex);
+ sysaufs_brs_add(sb, add_bindex); /* append successors */
+ dbgaufs_brs_add(sb, add_bindex, /*topdown*/0); /* rename successors */
+
+ h_dentry = add->path.dentry;
+ if (!add_bindex) {
+ au_cpup_attr_all(root_inode, /*force*/1);
+ sb->s_maxbytes = h_dentry->d_sb->s_maxbytes;
+ } else
+ au_add_nlink(root_inode, d_inode(h_dentry));
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static unsigned long long au_farray_cb(struct super_block *sb, void *a,
+ unsigned long long max __maybe_unused,
+ void *arg)
+{
+ unsigned long long n;
+ struct file **p, *f;
+ struct hlist_bl_head *files;
+ struct hlist_bl_node *pos;
+ struct au_finfo *finfo;
+
+ n = 0;
+ p = a;
+ files = &au_sbi(sb)->si_files;
+ hlist_bl_lock(files);
+ hlist_bl_for_each_entry(finfo, pos, files, fi_hlist) {
+ f = finfo->fi_file;
+ if (file_count(f)
+ && !special_file(file_inode(f)->i_mode)) {
+ get_file(f);
+ *p++ = f;
+ n++;
+ AuDebugOn(n > max);
+ }
+ }
+ hlist_bl_unlock(files);
+
+ return n;
+}
+
+static struct file **au_farray_alloc(struct super_block *sb,
+ unsigned long long *max)
+{
+ struct au_sbinfo *sbi;
+
+ sbi = au_sbi(sb);
+ *max = au_lcnt_read(&sbi->si_nfiles, /*do_rev*/1);
+ return au_array_alloc(max, au_farray_cb, sb, /*arg*/NULL);
+}
+
+static void au_farray_free(struct file **a, unsigned long long max)
+{
+ unsigned long long ull;
+
+ for (ull = 0; ull < max; ull++)
+ if (a[ull])
+ fput(a[ull]);
+ kvfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * delete a branch
+ */
+
+/* to show the line number, do not make it inlined function */
+#define AuVerbose(do_info, fmt, ...) do { \
+ if (do_info) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static int au_test_ibusy(struct inode *inode, aufs_bindex_t btop,
+ aufs_bindex_t bbot)
+{
+ return (inode && !S_ISDIR(inode->i_mode)) || btop == bbot;
+}
+
+static int au_test_dbusy(struct dentry *dentry, aufs_bindex_t btop,
+ aufs_bindex_t bbot)
+{
+ return au_test_ibusy(d_inode(dentry), btop, bbot);
+}
+
+/*
+ * test if the branch is deletable or not.
+ */
+static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex,
+ unsigned int sigen, const unsigned int verbose)
+{
+ int err, i, j, ndentry;
+ aufs_bindex_t btop, bbot;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry *d;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_dcsub_pages(&dpages, root, NULL, NULL);
+ if (unlikely(err))
+ goto out_dpages;
+
+ for (i = 0; !err && i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ ndentry = dpage->ndentry;
+ for (j = 0; !err && j < ndentry; j++) {
+ d = dpage->dentries[j];
+ AuDebugOn(au_dcount(d) <= 0);
+ if (!au_digen_test(d, sigen)) {
+ di_read_lock_child(d, AuLock_IR);
+ if (unlikely(au_dbrange_test(d))) {
+ di_read_unlock(d, AuLock_IR);
+ continue;
+ }
+ } else {
+ di_write_lock_child(d);
+ if (unlikely(au_dbrange_test(d))) {
+ di_write_unlock(d);
+ continue;
+ }
+ err = au_reval_dpath(d, sigen);
+ if (!err)
+ di_downgrade_lock(d, AuLock_IR);
+ else {
+ di_write_unlock(d);
+ break;
+ }
+ }
+
+ /* AuDbgDentry(d); */
+ btop = au_dbtop(d);
+ bbot = au_dbbot(d);
+ if (btop <= bindex
+ && bindex <= bbot
+ && au_h_dptr(d, bindex)
+ && au_test_dbusy(d, btop, bbot)) {
+ err = -EBUSY;
+ AuVerbose(verbose, "busy %pd\n", d);
+ AuDbgDentry(d);
+ }
+ di_read_unlock(d, AuLock_IR);
+ }
+ }
+
+out_dpages:
+ au_dpages_free(&dpages);
+out:
+ return err;
+}
+
+static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex,
+ unsigned int sigen, const unsigned int verbose)
+{
+ int err;
+ unsigned long long max, ull;
+ struct inode *i, **array;
+ aufs_bindex_t btop, bbot;
+
+ array = au_iarray_alloc(sb, &max);
+ err = PTR_ERR(array);
+ if (IS_ERR(array))
+ goto out;
+
+ err = 0;
+ AuDbg("b%d\n", bindex);
+ for (ull = 0; !err && ull < max; ull++) {
+ i = array[ull];
+ if (unlikely(!i))
+ break;
+ if (i->i_ino == AUFS_ROOT_INO)
+ continue;
+
+ /* AuDbgInode(i); */
+ if (au_iigen(i, NULL) == sigen)
+ ii_read_lock_child(i);
+ else {
+ ii_write_lock_child(i);
+ err = au_refresh_hinode_self(i);
+ au_iigen_dec(i);
+ if (!err)
+ ii_downgrade_lock(i);
+ else {
+ ii_write_unlock(i);
+ break;
+ }
+ }
+
+ btop = au_ibtop(i);
+ bbot = au_ibbot(i);
+ if (btop <= bindex
+ && bindex <= bbot
+ && au_h_iptr(i, bindex)
+ && au_test_ibusy(i, btop, bbot)) {
+ err = -EBUSY;
+ AuVerbose(verbose, "busy i%lu\n", i->i_ino);
+ AuDbgInode(i);
+ }
+ ii_read_unlock(i);
+ }
+ au_iarray_free(array, max);
+
+out:
+ return err;
+}
+
+static int test_children_busy(struct dentry *root, aufs_bindex_t bindex,
+ const unsigned int verbose)
+{
+ int err;
+ unsigned int sigen;
+
+ sigen = au_sigen(root->d_sb);
+ DiMustNoWaiters(root);
+ IiMustNoWaiters(d_inode(root));
+ di_write_unlock(root);
+ err = test_dentry_busy(root, bindex, sigen, verbose);
+ if (!err)
+ err = test_inode_busy(root->d_sb, bindex, sigen, verbose);
+ di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */
+
+ return err;
+}
+
+static int test_dir_busy(struct file *file, aufs_bindex_t br_id,
+ struct file **to_free, int *idx)
+{
+ int err;
+ unsigned char matched, root;
+ aufs_bindex_t bindex, bbot;
+ struct au_fidir *fidir;
+ struct au_hfile *hfile;
+
+ err = 0;
+ root = IS_ROOT(file->f_path.dentry);
+ if (root) {
+ get_file(file);
+ to_free[*idx] = file;
+ (*idx)++;
+ goto out;
+ }
+
+ matched = 0;
+ fidir = au_fi(file)->fi_hdir;
+ AuDebugOn(!fidir);
+ bbot = au_fbbot_dir(file);
+ for (bindex = au_fbtop(file); bindex <= bbot; bindex++) {
+ hfile = fidir->fd_hfile + bindex;
+ if (!hfile->hf_file)
+ continue;
+
+ if (hfile->hf_br->br_id == br_id) {
+ matched = 1;
+ break;
+ }
+ }
+ if (matched)
+ err = -EBUSY;
+
+out:
+ return err;
+}
+
+static int test_file_busy(struct super_block *sb, aufs_bindex_t br_id,
+ struct file **to_free, int opened)
+{
+ int err, idx;
+ unsigned long long ull, max;
+ aufs_bindex_t btop;
+ struct file *file, **array;
+ struct dentry *root;
+ struct au_hfile *hfile;
+
+ array = au_farray_alloc(sb, &max);
+ err = PTR_ERR(array);
+ if (IS_ERR(array))
+ goto out;
+
+ err = 0;
+ idx = 0;
+ root = sb->s_root;
+ di_write_unlock(root);
+ for (ull = 0; ull < max; ull++) {
+ file = array[ull];
+ if (unlikely(!file))
+ break;
+
+ /* AuDbg("%pD\n", file); */
+ fi_read_lock(file);
+ btop = au_fbtop(file);
+ if (!d_is_dir(file->f_path.dentry)) {
+ hfile = &au_fi(file)->fi_htop;
+ if (hfile->hf_br->br_id == br_id)
+ err = -EBUSY;
+ } else
+ err = test_dir_busy(file, br_id, to_free, &idx);
+ fi_read_unlock(file);
+ if (unlikely(err))
+ break;
+ }
+ di_write_lock_child(root);
+ au_farray_free(array, max);
+ AuDebugOn(idx > opened);
+
+out:
+ return err;
+}
+
+static void br_del_file(struct file **to_free, unsigned long long opened,
+ aufs_bindex_t br_id)
+{
+ unsigned long long ull;
+ aufs_bindex_t bindex, btop, bbot, bfound;
+ struct file *file;
+ struct au_fidir *fidir;
+ struct au_hfile *hfile;
+
+ for (ull = 0; ull < opened; ull++) {
+ file = to_free[ull];
+ if (unlikely(!file))
+ break;
+
+ /* AuDbg("%pD\n", file); */
+ AuDebugOn(!d_is_dir(file->f_path.dentry));
+ bfound = -1;
+ fidir = au_fi(file)->fi_hdir;
+ AuDebugOn(!fidir);
+ fi_write_lock(file);
+ btop = au_fbtop(file);
+ bbot = au_fbbot_dir(file);
+ for (bindex = btop; bindex <= bbot; bindex++) {
+ hfile = fidir->fd_hfile + bindex;
+ if (!hfile->hf_file)
+ continue;
+
+ if (hfile->hf_br->br_id == br_id) {
+ bfound = bindex;
+ break;
+ }
+ }
+ AuDebugOn(bfound < 0);
+ au_set_h_fptr(file, bfound, NULL);
+ if (bfound == btop) {
+ for (btop++; btop <= bbot; btop++)
+ if (au_hf_dir(file, btop)) {
+ au_set_fbtop(file, btop);
+ break;
+ }
+ }
+ fi_write_unlock(file);
+ }
+}
+
+static void au_br_do_del_brp(struct au_sbinfo *sbinfo,
+ const aufs_bindex_t bindex,
+ const aufs_bindex_t bbot)
+{
+ struct au_branch **brp, **p;
+
+ AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+ brp = sbinfo->si_branch + bindex;
+ if (bindex < bbot)
+ memmove(brp, brp + 1, sizeof(*brp) * (bbot - bindex));
+ sbinfo->si_branch[0 + bbot] = NULL;
+ sbinfo->si_bbot--;
+
+ p = au_krealloc(sbinfo->si_branch, sizeof(*p) * bbot, AuGFP_SBILIST,
+ /*may_shrink*/1);
+ if (p)
+ sbinfo->si_branch = p;
+ /* harmless error */
+}
+
+static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex,
+ const aufs_bindex_t bbot)
+{
+ struct au_hdentry *hdp, *p;
+
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ hdp = au_hdentry(dinfo, bindex);
+ if (bindex < bbot)
+ memmove(hdp, hdp + 1, sizeof(*hdp) * (bbot - bindex));
+ /* au_h_dentry_init(au_hdentry(dinfo, bbot); */
+ dinfo->di_bbot--;
+
+ p = au_krealloc(dinfo->di_hdentry, sizeof(*p) * bbot, AuGFP_SBILIST,
+ /*may_shrink*/1);
+ if (p)
+ dinfo->di_hdentry = p;
+ /* harmless error */
+}
+
+static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex,
+ const aufs_bindex_t bbot)
+{
+ struct au_hinode *hip, *p;
+
+ AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+ hip = au_hinode(iinfo, bindex);
+ if (bindex < bbot)
+ memmove(hip, hip + 1, sizeof(*hip) * (bbot - bindex));
+ /* au_hinode_init(au_hinode(iinfo, bbot)); */
+ iinfo->ii_bbot--;
+
+ p = au_krealloc(iinfo->ii_hinode, sizeof(*p) * bbot, AuGFP_SBILIST,
+ /*may_shrink*/1);
+ if (p)
+ iinfo->ii_hinode = p;
+ /* harmless error */
+}
+
+static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex,
+ struct au_branch *br)
+{
+ aufs_bindex_t bbot;
+ struct au_sbinfo *sbinfo;
+ struct dentry *root, *h_root;
+ struct inode *inode, *h_inode;
+ struct au_hinode *hinode;
+
+ SiMustWriteLock(sb);
+
+ root = sb->s_root;
+ inode = d_inode(root);
+ sbinfo = au_sbi(sb);
+ bbot = sbinfo->si_bbot;
+
+ h_root = au_h_dptr(root, bindex);
+ hinode = au_hi(inode, bindex);
+ h_inode = au_igrab(hinode->hi_inode);
+ au_hiput(hinode);
+
+ au_sbilist_lock();
+ au_br_do_del_brp(sbinfo, bindex, bbot);
+ au_br_do_del_hdp(au_di(root), bindex, bbot);
+ au_br_do_del_hip(au_ii(inode), bindex, bbot);
+ au_sbilist_unlock();
+
+ /* ignore an error */
+ au_dr_br_fin(sb, br); /* always, regardless the mount option */
+
+ dput(h_root);
+ iput(h_inode);
+ au_br_do_free(br);
+}
+
+static unsigned long long empty_cb(struct super_block *sb, void *array,
+ unsigned long long max, void *arg)
+{
+ return max;
+}
+
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount)
+{
+ int err, rerr, i;
+ unsigned long long opened;
+ unsigned int mnt_flags;
+ aufs_bindex_t bindex, bbot, br_id;
+ unsigned char do_wh, verbose;
+ struct au_branch *br;
+ struct au_wbr *wbr;
+ struct dentry *root;
+ struct file **to_free;
+
+ err = 0;
+ opened = 0;
+ to_free = NULL;
+ root = sb->s_root;
+ bindex = au_find_dbindex(root, del->h_path.dentry);
+ if (bindex < 0) {
+ if (remount)
+ goto out; /* success */
+ err = -ENOENT;
+ pr_err("%s no such branch\n", del->pathname);
+ goto out;
+ }
+ AuDbg("bindex b%d\n", bindex);
+
+ err = -EBUSY;
+ mnt_flags = au_mntflags(sb);
+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
+ bbot = au_sbbot(sb);
+ if (unlikely(!bbot)) {
+ AuVerbose(verbose, "no more branches left\n");
+ goto out;
+ }
+
+ br = au_sbr(sb, bindex);
+ AuDebugOn(!path_equal(&br->br_path, &del->h_path));
+ if (unlikely(au_lcnt_read(&br->br_count, /*do_rev*/1))) {
+ AuVerbose(verbose, "br %pd2 is busy now\n", del->h_path.dentry);
+ goto out;
+ }
+
+ br_id = br->br_id;
+ opened = au_lcnt_read(&br->br_nfiles, /*do_rev*/1);
+ if (unlikely(opened)) {
+ to_free = au_array_alloc(&opened, empty_cb, sb, NULL);
+ err = PTR_ERR(to_free);
+ if (IS_ERR(to_free))
+ goto out;
+
+ err = test_file_busy(sb, br_id, to_free, opened);
+ if (unlikely(err)) {
+ AuVerbose(verbose, "%llu file(s) opened\n", opened);
+ goto out;
+ }
+ }
+
+ wbr = br->br_wbr;
+ do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph);
+ if (do_wh) {
+ /* instead of WbrWhMustWriteLock(wbr) */
+ SiMustWriteLock(sb);
+ for (i = 0; i < AuBrWh_Last; i++) {
+ dput(wbr->wbr_wh[i]);
+ wbr->wbr_wh[i] = NULL;
+ }
+ }
+
+ err = test_children_busy(root, bindex, verbose);
+ if (unlikely(err)) {
+ if (do_wh)
+ goto out_wh;
+ goto out;
+ }
+
+ err = 0;
+ if (to_free) {
+ /*
+ * now we confirmed the branch is deletable.
+ * let's free the remaining opened dirs on the branch.
+ */
+ di_write_unlock(root);
+ br_del_file(to_free, opened, br_id);
+ di_write_lock_child(root);
+ }
+
+ sysaufs_brs_del(sb, bindex); /* remove successors */
+ dbgaufs_xino_del(br); /* remove one */
+ au_br_do_del(sb, bindex, br);
+ sysaufs_brs_add(sb, bindex); /* append successors */
+ dbgaufs_brs_add(sb, bindex, /*topdown*/1); /* rename successors */
+
+ if (!bindex) {
+ au_cpup_attr_all(d_inode(root), /*force*/1);
+ sb->s_maxbytes = au_sbr_sb(sb, 0)->s_maxbytes;
+ } else
+ au_sub_nlink(d_inode(root), d_inode(del->h_path.dentry));
+ if (au_opt_test(mnt_flags, PLINK))
+ au_plink_half_refresh(sb, br_id);
+
+ goto out; /* success */
+
+out_wh:
+ /* revert */
+ rerr = au_br_init_wh(sb, br, br->br_perm);
+ if (rerr)
+ pr_warn("failed re-creating base whiteout, %s. (%d)\n",
+ del->pathname, rerr);
+out:
+ if (to_free)
+ au_farray_free(to_free, opened);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg)
+{
+ int err;
+ aufs_bindex_t btop, bbot;
+ struct aufs_ibusy ibusy;
+ struct inode *inode, *h_inode;
+
+ err = -EPERM;
+ if (unlikely(!capable(CAP_SYS_ADMIN)))
+ goto out;
+
+ err = copy_from_user(&ibusy, arg, sizeof(ibusy));
+ if (!err)
+ err = !access_ok(&arg->h_ino, sizeof(arg->h_ino));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out;
+ }
+
+ err = -EINVAL;
+ si_read_lock(sb, AuLock_FLUSH);
+ if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbbot(sb)))
+ goto out_unlock;
+
+ err = 0;
+ ibusy.h_ino = 0; /* invalid */
+ inode = ilookup(sb, ibusy.ino);
+ if (!inode
+ || inode->i_ino == AUFS_ROOT_INO
+ || au_is_bad_inode(inode))
+ goto out_unlock;
+
+ ii_read_lock_child(inode);
+ btop = au_ibtop(inode);
+ bbot = au_ibbot(inode);
+ if (btop <= ibusy.bindex && ibusy.bindex <= bbot) {
+ h_inode = au_h_iptr(inode, ibusy.bindex);
+ if (h_inode && au_test_ibusy(inode, btop, bbot))
+ ibusy.h_ino = h_inode->i_ino;
+ }
+ ii_read_unlock(inode);
+ iput(inode);
+
+out_unlock:
+ si_read_unlock(sb);
+ if (!err) {
+ err = __put_user(ibusy.h_ino, &arg->h_ino);
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ }
+ }
+out:
+ return err;
+}
+
+long au_ibusy_ioctl(struct file *file, unsigned long arg)
+{
+ return au_ibusy(file->f_path.dentry->d_sb, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg)
+{
+ return au_ibusy(file->f_path.dentry->d_sb, compat_ptr(arg));
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * change a branch permission
+ */
+
+static void au_warn_ima(void)
+{
+#ifdef CONFIG_IMA
+ /* since it doesn't support mark_files_ro() */
+ AuWarn1("RW -> RO makes IMA to produce wrong message\n");
+#endif
+}
+
+static int do_need_sigen_inc(int a, int b)
+{
+ return au_br_whable(a) && !au_br_whable(b);
+}
+
+static int need_sigen_inc(int old, int new)
+{
+ return do_need_sigen_inc(old, new)
+ || do_need_sigen_inc(new, old);
+}
+
+static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex)
+{
+ int err, do_warn;
+ unsigned int mnt_flags;
+ unsigned long long ull, max;
+ aufs_bindex_t br_id;
+ unsigned char verbose, writer;
+ struct file *file, *hf, **array;
+ struct au_hfile *hfile;
+
+ mnt_flags = au_mntflags(sb);
+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
+
+ array = au_farray_alloc(sb, &max);
+ err = PTR_ERR(array);
+ if (IS_ERR(array))
+ goto out;
+
+ do_warn = 0;
+ br_id = au_sbr_id(sb, bindex);
+ for (ull = 0; ull < max; ull++) {
+ file = array[ull];
+ if (unlikely(!file))
+ break;
+
+ /* AuDbg("%pD\n", file); */
+ fi_read_lock(file);
+ if (unlikely(au_test_mmapped(file))) {
+ err = -EBUSY;
+ AuVerbose(verbose, "mmapped %pD\n", file);
+ AuDbgFile(file);
+ FiMustNoWaiters(file);
+ fi_read_unlock(file);
+ goto out_array;
+ }
+
+ hfile = &au_fi(file)->fi_htop;
+ hf = hfile->hf_file;
+ if (!d_is_reg(file->f_path.dentry)
+ || !(file->f_mode & FMODE_WRITE)
+ || hfile->hf_br->br_id != br_id
+ || !(hf->f_mode & FMODE_WRITE))
+ array[ull] = NULL;
+ else {
+ do_warn = 1;
+ get_file(file);
+ }
+
+ FiMustNoWaiters(file);
+ fi_read_unlock(file);
+ fput(file);
+ }
+
+ err = 0;
+ if (do_warn)
+ au_warn_ima();
+
+ for (ull = 0; ull < max; ull++) {
+ file = array[ull];
+ if (!file)
+ continue;
+
+ /* todo: already flushed? */
+ /*
+ * fs/super.c:mark_files_ro() is gone, but aufs keeps its
+ * approach which resets f_mode and calls mnt_drop_write() and
+ * file_release_write() for each file, because the branch
+ * attribute in aufs world is totally different from the native
+ * fs rw/ro mode.
+ */
+ /* fi_read_lock(file); */
+ hfile = &au_fi(file)->fi_htop;
+ hf = hfile->hf_file;
+ /* fi_read_unlock(file); */
+ spin_lock(&hf->f_lock);
+ writer = !!(hf->f_mode & FMODE_WRITER);
+ hf->f_mode &= ~(FMODE_WRITE | FMODE_WRITER);
+ spin_unlock(&hf->f_lock);
+ if (writer) {
+ put_write_access(file_inode(hf));
+ __mnt_drop_write(hf->f_path.mnt);
+ }
+ }
+
+out_array:
+ au_farray_free(array, max);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+ int *do_refresh)
+{
+ int err, rerr;
+ aufs_bindex_t bindex;
+ struct dentry *root;
+ struct au_branch *br;
+ struct au_br_fhsm *bf;
+
+ root = sb->s_root;
+ bindex = au_find_dbindex(root, mod->h_root);
+ if (bindex < 0) {
+ if (remount)
+ return 0; /* success */
+ err = -ENOENT;
+ pr_err("%s no such branch\n", mod->path);
+ goto out;
+ }
+ AuDbg("bindex b%d\n", bindex);
+
+ err = test_br(d_inode(mod->h_root), mod->perm, mod->path);
+ if (unlikely(err))
+ goto out;
+
+ br = au_sbr(sb, bindex);
+ AuDebugOn(mod->h_root != au_br_dentry(br));
+ if (br->br_perm == mod->perm)
+ return 0; /* success */
+
+ /* pre-allocate for non-fhsm --> fhsm */
+ bf = NULL;
+ if (!au_br_fhsm(br->br_perm) && au_br_fhsm(mod->perm)) {
+ err = au_fhsm_br_alloc(br);
+ if (unlikely(err))
+ goto out;
+ bf = br->br_fhsm;
+ br->br_fhsm = NULL;
+ }
+
+ if (au_br_writable(br->br_perm)) {
+ /* remove whiteout base */
+ err = au_br_init_wh(sb, br, mod->perm);
+ if (unlikely(err))
+ goto out_bf;
+
+ if (!au_br_writable(mod->perm)) {
+ /* rw --> ro, file might be mmapped */
+ DiMustNoWaiters(root);
+ IiMustNoWaiters(d_inode(root));
+ di_write_unlock(root);
+ err = au_br_mod_files_ro(sb, bindex);
+ /* aufs_write_lock() calls ..._child() */
+ di_write_lock_child(root);
+
+ if (unlikely(err)) {
+ rerr = -ENOMEM;
+ br->br_wbr = kzalloc(sizeof(*br->br_wbr),
+ GFP_NOFS);
+ if (br->br_wbr)
+ rerr = au_wbr_init(br, sb, br->br_perm);
+ if (unlikely(rerr)) {
+ AuIOErr("nested error %d (%d)\n",
+ rerr, err);
+ br->br_perm = mod->perm;
+ }
+ }
+ }
+ } else if (au_br_writable(mod->perm)) {
+ /* ro --> rw */
+ err = -ENOMEM;
+ br->br_wbr = kzalloc(sizeof(*br->br_wbr), GFP_NOFS);
+ if (br->br_wbr) {
+ err = au_wbr_init(br, sb, mod->perm);
+ if (unlikely(err)) {
+ au_kfree_rcu(br->br_wbr);
+ br->br_wbr = NULL;
+ }
+ }
+ }
+ if (unlikely(err))
+ goto out_bf;
+
+ if (au_br_fhsm(br->br_perm)) {
+ if (!au_br_fhsm(mod->perm)) {
+ /* fhsm --> non-fhsm */
+ au_br_fhsm_fin(br->br_fhsm);
+ au_kfree_rcu(br->br_fhsm);
+ br->br_fhsm = NULL;
+ }
+ } else if (au_br_fhsm(mod->perm))
+ /* non-fhsm --> fhsm */
+ br->br_fhsm = bf;
+
+ *do_refresh |= need_sigen_inc(br->br_perm, mod->perm);
+ br->br_perm = mod->perm;
+ goto out; /* success */
+
+out_bf:
+ au_kfree_try_rcu(bf);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs)
+{
+ int err;
+ struct kstatfs kstfs;
+
+ err = vfs_statfs(&br->br_path, &kstfs);
+ if (!err) {
+ stfs->f_blocks = kstfs.f_blocks;
+ stfs->f_bavail = kstfs.f_bavail;
+ stfs->f_files = kstfs.f_files;
+ stfs->f_ffree = kstfs.f_ffree;
+ }
+
+ return err;
+}
diff --git a/fs/aufs/branch.h b/fs/aufs/branch.h
new file mode 100644
index 000000000000..f99063aa54cd
--- /dev/null
+++ b/fs/aufs/branch.h
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * branch filesystems and xino for them
+ */
+
+#ifndef __AUFS_BRANCH_H__
+#define __AUFS_BRANCH_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mount.h>
+#include "dirren.h"
+#include "dynop.h"
+#include "lcnt.h"
+#include "rwsem.h"
+#include "super.h"
+
+/* ---------------------------------------------------------------------- */
+
+/* a xino file */
+struct au_xino {
+ struct file **xi_file;
+ unsigned int xi_nfile;
+
+ struct {
+ spinlock_t spin;
+ ino_t *array;
+ int total;
+ /* reserved for future use */
+ /* unsigned long *bitmap; */
+ wait_queue_head_t wqh;
+ } xi_nondir;
+
+ struct mutex xi_mtx; /* protects xi_file array */
+ struct hlist_bl_head xi_writing;
+
+ atomic_t xi_truncating;
+
+ struct kref xi_kref;
+};
+
+/* File-based Hierarchical Storage Management */
+struct au_br_fhsm {
+#ifdef CONFIG_AUFS_FHSM
+ struct mutex bf_lock;
+ unsigned long bf_jiffy;
+ struct aufs_stfs bf_stfs;
+ int bf_readable;
+#endif
+};
+
+/* members for writable branch only */
+enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last};
+struct au_wbr {
+ struct au_rwsem wbr_wh_rwsem;
+ struct dentry *wbr_wh[AuBrWh_Last];
+ atomic_t wbr_wh_running;
+#define wbr_whbase wbr_wh[AuBrWh_BASE] /* whiteout base */
+#define wbr_plink wbr_wh[AuBrWh_PLINK] /* pseudo-link dir */
+#define wbr_orph wbr_wh[AuBrWh_ORPH] /* dir for orphans */
+
+ /* mfs mode */
+ unsigned long long wbr_bytes;
+};
+
+/* ext2 has 3 types of operations at least, ext3 has 4 */
+#define AuBrDynOp (AuDyLast * 4)
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+/* support for asynchronous destruction */
+struct au_br_hfsnotify {
+ struct fsnotify_group *hfsn_group;
+};
+#endif
+
+/* sysfs entries */
+struct au_brsysfs {
+ char name[16];
+ struct attribute attr;
+};
+
+enum {
+ AuBrSysfs_BR,
+ AuBrSysfs_BRID,
+ AuBrSysfs_Last
+};
+
+/* protected by superblock rwsem */
+struct au_branch {
+ struct au_xino *br_xino;
+
+ aufs_bindex_t br_id;
+
+ int br_perm;
+ struct path br_path;
+ spinlock_t br_dykey_lock;
+ struct au_dykey *br_dykey[AuBrDynOp];
+ au_lcnt_t br_nfiles; /* opened files */
+ au_lcnt_t br_count; /* in-use for other */
+
+ struct au_wbr *br_wbr;
+ struct au_br_fhsm *br_fhsm;
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+ struct au_br_hfsnotify *br_hfsn;
+#endif
+
+#ifdef CONFIG_SYSFS
+ /* entries under sysfs per mount-point */
+ struct au_brsysfs br_sysfs[AuBrSysfs_Last];
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *br_dbgaufs; /* xino */
+#endif
+
+ struct au_dr_br br_dirren;
+};
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct vfsmount *au_br_mnt(struct au_branch *br)
+{
+ return br->br_path.mnt;
+}
+
+static inline struct dentry *au_br_dentry(struct au_branch *br)
+{
+ return br->br_path.dentry;
+}
+
+static inline struct super_block *au_br_sb(struct au_branch *br)
+{
+ return au_br_mnt(br)->mnt_sb;
+}
+
+static inline int au_br_rdonly(struct au_branch *br)
+{
+ return (sb_rdonly(au_br_sb(br))
+ || !au_br_writable(br->br_perm))
+ ? -EROFS : 0;
+}
+
+static inline int au_br_hnotifyable(int brperm __maybe_unused)
+{
+#ifdef CONFIG_AUFS_HNOTIFY
+ return !(brperm & AuBrPerm_RR);
+#else
+ return 0;
+#endif
+}
+
+static inline int au_br_test_oflag(int oflag, struct au_branch *br)
+{
+ int err, exec_flag;
+
+ err = 0;
+ exec_flag = oflag & __FMODE_EXEC;
+ if (unlikely(exec_flag && path_noexec(&br->br_path)))
+ err = -EACCES;
+
+ return err;
+}
+
+static inline void au_xino_get(struct au_branch *br)
+{
+ struct au_xino *xi;
+
+ xi = br->br_xino;
+ if (xi)
+ kref_get(&xi->xi_kref);
+}
+
+static inline int au_xino_count(struct au_branch *br)
+{
+ int v;
+ struct au_xino *xi;
+
+ v = 0;
+ xi = br->br_xino;
+ if (xi)
+ v = kref_read(&xi->xi_kref);
+
+ return v;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* branch.c */
+struct au_sbinfo;
+void au_br_free(struct au_sbinfo *sinfo);
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id);
+struct au_opt_add;
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount);
+struct au_opt_del;
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount);
+long au_ibusy_ioctl(struct file *file, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg);
+#endif
+struct au_opt_mod;
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+ int *do_refresh);
+struct aufs_stfs;
+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs);
+
+/* xino.c */
+static const loff_t au_loff_max = LLONG_MAX;
+
+aufs_bindex_t au_xi_root(struct super_block *sb, struct dentry *dentry);
+struct file *au_xino_create(struct super_block *sb, char *fpath, int silent);
+struct file *au_xino_create2(struct super_block *sb, struct path *base,
+ struct file *copy_src);
+struct au_xi_new {
+ struct au_xino *xi; /* switch between xino and xigen */
+ int idx;
+ struct path *base;
+ struct file *copy_src;
+};
+struct file *au_xi_new(struct super_block *sb, struct au_xi_new *xinew);
+
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ ino_t *ino);
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ ino_t ino);
+ssize_t xino_fread(vfs_readf_t func, struct file *file, void *buf, size_t size,
+ loff_t *pos);
+ssize_t xino_fwrite(vfs_writef_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos);
+
+int au_xib_trunc(struct super_block *sb);
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex, int idx_begin);
+
+struct au_xino *au_xino_alloc(unsigned int nfile);
+int au_xino_put(struct au_branch *br);
+struct file *au_xino_file1(struct au_xino *xi);
+
+struct au_opt_xino;
+void au_xino_clr(struct super_block *sb);
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xiopt, int remount);
+struct file *au_xino_def(struct super_block *sb);
+int au_xino_init_br(struct super_block *sb, struct au_branch *br, ino_t hino,
+ struct path *base);
+
+ino_t au_xino_new_ino(struct super_block *sb);
+void au_xino_delete_inode(struct inode *inode, const int unlinked);
+
+void au_xinondir_leave(struct super_block *sb, aufs_bindex_t bindex,
+ ino_t h_ino, int idx);
+int au_xinondir_enter(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ int *idx);
+
+int au_xino_path(struct seq_file *seq, struct file *file);
+
+/* ---------------------------------------------------------------------- */
+
+/* @idx is signed to accept -1 meaning the first file */
+static inline struct file *au_xino_file(struct au_xino *xi, int idx)
+{
+ struct file *file;
+
+ file = NULL;
+ if (!xi)
+ goto out;
+
+ if (idx >= 0) {
+ if (idx < xi->xi_nfile)
+ file = xi->xi_file[idx];
+ } else
+ file = au_xino_file1(xi);
+
+out:
+ return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Superblock to branch */
+static inline
+aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_sbr(sb, bindex)->br_id;
+}
+
+static inline
+struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_br_mnt(au_sbr(sb, bindex));
+}
+
+static inline
+struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_br_sb(au_sbr(sb, bindex));
+}
+
+static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_sbr(sb, bindex)->br_perm;
+}
+
+static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_br_whable(au_sbr_perm(sb, bindex));
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define wbr_wh_read_lock(wbr) au_rw_read_lock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_write_lock(wbr) au_rw_write_lock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_read_trylock(wbr) au_rw_read_trylock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_write_trylock(wbr) au_rw_write_trylock(&(wbr)->wbr_wh_rwsem)
+/*
+#define wbr_wh_read_trylock_nested(wbr) \
+ au_rw_read_trylock_nested(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_write_trylock_nested(wbr) \
+ au_rw_write_trylock_nested(&(wbr)->wbr_wh_rwsem)
+*/
+
+#define wbr_wh_read_unlock(wbr) au_rw_read_unlock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_write_unlock(wbr) au_rw_write_unlock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_downgrade_lock(wbr) au_rw_dgrade_lock(&(wbr)->wbr_wh_rwsem)
+
+#define WbrWhMustNoWaiters(wbr) AuRwMustNoWaiters(&(wbr)->wbr_wh_rwsem)
+#define WbrWhMustAnyLock(wbr) AuRwMustAnyLock(&(wbr)->wbr_wh_rwsem)
+#define WbrWhMustWriteLock(wbr) AuRwMustWriteLock(&(wbr)->wbr_wh_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_FHSM
+static inline void au_br_fhsm_init(struct au_br_fhsm *brfhsm)
+{
+ mutex_init(&brfhsm->bf_lock);
+ brfhsm->bf_jiffy = 0;
+ brfhsm->bf_readable = 0;
+}
+
+static inline void au_br_fhsm_fin(struct au_br_fhsm *brfhsm)
+{
+ mutex_destroy(&brfhsm->bf_lock);
+}
+#else
+AuStubVoid(au_br_fhsm_init, struct au_br_fhsm *brfhsm)
+AuStubVoid(au_br_fhsm_fin, struct au_br_fhsm *brfhsm)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_BRANCH_H__ */
diff --git a/fs/aufs/conf.mk b/fs/aufs/conf.mk
new file mode 100644
index 000000000000..12782f8e0f38
--- /dev/null
+++ b/fs/aufs/conf.mk
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+
+AuConfStr = CONFIG_AUFS_FS=${CONFIG_AUFS_FS}
+
+define AuConf
+ifdef ${1}
+AuConfStr += ${1}=${${1}}
+endif
+endef
+
+AuConfAll = BRANCH_MAX_127 BRANCH_MAX_511 BRANCH_MAX_1023 BRANCH_MAX_32767 \
+ SBILIST \
+ HNOTIFY HFSNOTIFY \
+ EXPORT INO_T_64 \
+ XATTR \
+ FHSM \
+ RDU \
+ DIRREN \
+ SHWH \
+ BR_RAMFS \
+ BR_FUSE POLL \
+ BR_HFSPLUS \
+ BDEV_LOOP \
+ DEBUG MAGIC_SYSRQ
+$(foreach i, ${AuConfAll}, \
+ $(eval $(call AuConf,CONFIG_AUFS_${i})))
+
+AuConfName = ${obj}/conf.str
+${AuConfName}.tmp: FORCE
+ @echo ${AuConfStr} | tr ' ' '\n' | sed -e 's/^/"/' -e 's/$$/\\n"/' > $@
+${AuConfName}: ${AuConfName}.tmp
+ @diff -q $< $@ > /dev/null 2>&1 || { \
+ echo ' GEN ' $@; \
+ cp -p $< $@; \
+ }
+FORCE:
+clean-files += ${AuConfName} ${AuConfName}.tmp
+${obj}/sysfs.o: ${AuConfName}
+
+-include ${srctree}/${src}/conf_priv.mk
diff --git a/fs/aufs/cpup.c b/fs/aufs/cpup.c
new file mode 100644
index 000000000000..21134b448967
--- /dev/null
+++ b/fs/aufs/cpup.c
@@ -0,0 +1,1458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * copy-up functions, see wbr_policy.c for copy-down
+ */
+
+#include <linux/fs_stack.h>
+#include <linux/mm.h>
+#include <linux/task_work.h>
+#include "aufs.h"
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags)
+{
+ const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE
+ | S_NOATIME | S_NOCMTIME | S_AUTOMOUNT;
+
+ BUILD_BUG_ON(sizeof(iflags) != sizeof(dst->i_flags));
+
+ dst->i_flags |= iflags & ~mask;
+ if (au_test_fs_notime(dst->i_sb))
+ dst->i_flags |= S_NOATIME | S_NOCMTIME;
+}
+
+void au_cpup_attr_timesizes(struct inode *inode)
+{
+ struct inode *h_inode;
+
+ h_inode = au_h_iptr(inode, au_ibtop(inode));
+ fsstack_copy_attr_times(inode, h_inode);
+ fsstack_copy_inode_size(inode, h_inode);
+}
+
+void au_cpup_attr_nlink(struct inode *inode, int force)
+{
+ struct inode *h_inode;
+ struct super_block *sb;
+ aufs_bindex_t bindex, bbot;
+
+ sb = inode->i_sb;
+ bindex = au_ibtop(inode);
+ h_inode = au_h_iptr(inode, bindex);
+ if (!force
+ && !S_ISDIR(h_inode->i_mode)
+ && au_opt_test(au_mntflags(sb), PLINK)
+ && au_plink_test(inode))
+ return;
+
+ /*
+ * 0 can happen in revalidating.
+ * h_inode->i_mutex may not be held here, but it is harmless since once
+ * i_nlink reaches 0, it will never become positive except O_TMPFILE
+ * case.
+ * todo: O_TMPFILE+linkat(AT_SYMLINK_FOLLOW) bypassing aufs may cause
+ * the incorrect link count.
+ */
+ set_nlink(inode, h_inode->i_nlink);
+
+ /*
+ * fewer nlink makes find(1) noisy, but larger nlink doesn't.
+ * it may includes whplink directory.
+ */
+ if (S_ISDIR(h_inode->i_mode)) {
+ bbot = au_ibbot(inode);
+ for (bindex++; bindex <= bbot; bindex++) {
+ h_inode = au_h_iptr(inode, bindex);
+ if (h_inode)
+ au_add_nlink(inode, h_inode);
+ }
+ }
+}
+
+void au_cpup_attr_changeable(struct inode *inode)
+{
+ struct inode *h_inode;
+
+ h_inode = au_h_iptr(inode, au_ibtop(inode));
+ inode->i_mode = h_inode->i_mode;
+ inode->i_uid = h_inode->i_uid;
+ inode->i_gid = h_inode->i_gid;
+ au_cpup_attr_timesizes(inode);
+ au_cpup_attr_flags(inode, h_inode->i_flags);
+}
+
+void au_cpup_igen(struct inode *inode, struct inode *h_inode)
+{
+ struct au_iinfo *iinfo = au_ii(inode);
+
+ IiMustWriteLock(inode);
+
+ iinfo->ii_higen = h_inode->i_generation;
+ iinfo->ii_hsb1 = h_inode->i_sb;
+}
+
+void au_cpup_attr_all(struct inode *inode, int force)
+{
+ struct inode *h_inode;
+
+ h_inode = au_h_iptr(inode, au_ibtop(inode));
+ au_cpup_attr_changeable(inode);
+ if (inode->i_nlink > 0)
+ au_cpup_attr_nlink(inode, force);
+ inode->i_rdev = h_inode->i_rdev;
+ inode->i_blkbits = h_inode->i_blkbits;
+ au_cpup_igen(inode, h_inode);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */
+
+/* keep the timestamps of the parent dir when cpup */
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+ struct path *h_path)
+{
+ struct inode *h_inode;
+
+ dt->dt_dentry = dentry;
+ dt->dt_h_path = *h_path;
+ h_inode = d_inode(h_path->dentry);
+ dt->dt_atime = h_inode->i_atime;
+ dt->dt_mtime = h_inode->i_mtime;
+ /* smp_mb(); */
+}
+
+void au_dtime_revert(struct au_dtime *dt)
+{
+ struct iattr attr;
+ int err;
+
+ attr.ia_atime = dt->dt_atime;
+ attr.ia_mtime = dt->dt_mtime;
+ attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET
+ | ATTR_ATIME | ATTR_ATIME_SET;
+
+ /* no delegation since this is a directory */
+ err = vfsub_notify_change(&dt->dt_h_path, &attr, /*delegated*/NULL);
+ if (unlikely(err))
+ pr_warn("restoring timestamps failed(%d). ignored\n", err);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* internal use only */
+struct au_cpup_reg_attr {
+ int valid;
+ struct kstat st;
+ unsigned int iflags; /* inode->i_flags */
+};
+
+static noinline_for_stack
+int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src,
+ struct au_cpup_reg_attr *h_src_attr)
+{
+ int err, sbits, icex;
+ unsigned int mnt_flags;
+ unsigned char verbose;
+ struct iattr ia;
+ struct path h_path;
+ struct inode *h_isrc, *h_idst;
+ struct kstat *h_st;
+ struct au_branch *br;
+
+ h_path.dentry = au_h_dptr(dst, bindex);
+ h_idst = d_inode(h_path.dentry);
+ br = au_sbr(dst->d_sb, bindex);
+ h_path.mnt = au_br_mnt(br);
+ h_isrc = d_inode(h_src);
+ ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID
+ | ATTR_ATIME | ATTR_MTIME
+ | ATTR_ATIME_SET | ATTR_MTIME_SET;
+ if (h_src_attr && h_src_attr->valid) {
+ h_st = &h_src_attr->st;
+ ia.ia_uid = h_st->uid;
+ ia.ia_gid = h_st->gid;
+ ia.ia_atime = h_st->atime;
+ ia.ia_mtime = h_st->mtime;
+ if (h_idst->i_mode != h_st->mode
+ && !S_ISLNK(h_idst->i_mode)) {
+ ia.ia_valid |= ATTR_MODE;
+ ia.ia_mode = h_st->mode;
+ }
+ sbits = !!(h_st->mode & (S_ISUID | S_ISGID));
+ au_cpup_attr_flags(h_idst, h_src_attr->iflags);
+ } else {
+ ia.ia_uid = h_isrc->i_uid;
+ ia.ia_gid = h_isrc->i_gid;
+ ia.ia_atime = h_isrc->i_atime;
+ ia.ia_mtime = h_isrc->i_mtime;
+ if (h_idst->i_mode != h_isrc->i_mode
+ && !S_ISLNK(h_idst->i_mode)) {
+ ia.ia_valid |= ATTR_MODE;
+ ia.ia_mode = h_isrc->i_mode;
+ }
+ sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID));
+ au_cpup_attr_flags(h_idst, h_isrc->i_flags);
+ }
+ /* no delegation since it is just created */
+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL);
+
+ /* is this nfs only? */
+ if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) {
+ ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+ ia.ia_mode = h_isrc->i_mode;
+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL);
+ }
+
+ icex = br->br_perm & AuBrAttr_ICEX;
+ if (!err) {
+ mnt_flags = au_mntflags(dst->d_sb);
+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
+ err = au_cpup_xattr(h_path.dentry, h_src, icex, verbose);
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
+ char *buf, unsigned long blksize)
+{
+ int err;
+ size_t sz, rbytes, wbytes;
+ unsigned char all_zero;
+ char *p, *zp;
+ struct inode *h_inode;
+ /* reduce stack usage */
+ struct iattr *ia;
+
+ zp = page_address(ZERO_PAGE(0));
+ if (unlikely(!zp))
+ return -ENOMEM; /* possible? */
+
+ err = 0;
+ all_zero = 0;
+ while (len) {
+ AuDbg("len %lld\n", len);
+ sz = blksize;
+ if (len < blksize)
+ sz = len;
+
+ rbytes = 0;
+ /* todo: signal_pending? */
+ while (!rbytes || err == -EAGAIN || err == -EINTR) {
+ rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
+ err = rbytes;
+ }
+ if (unlikely(err < 0))
+ break;
+
+ all_zero = 0;
+ if (len >= rbytes && rbytes == blksize)
+ all_zero = !memcmp(buf, zp, rbytes);
+ if (!all_zero) {
+ wbytes = rbytes;
+ p = buf;
+ while (wbytes) {
+ size_t b;
+
+ b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
+ err = b;
+ /* todo: signal_pending? */
+ if (unlikely(err == -EAGAIN || err == -EINTR))
+ continue;
+ if (unlikely(err < 0))
+ break;
+ wbytes -= b;
+ p += b;
+ }
+ if (unlikely(err < 0))
+ break;
+ } else {
+ loff_t res;
+
+ AuLabel(hole);
+ res = vfsub_llseek(dst, rbytes, SEEK_CUR);
+ err = res;
+ if (unlikely(res < 0))
+ break;
+ }
+ len -= rbytes;
+ err = 0;
+ }
+
+ /* the last block may be a hole */
+ if (!err && all_zero) {
+ AuLabel(last hole);
+
+ err = 1;
+ if (au_test_nfs(dst->f_path.dentry->d_sb)) {
+ /* nfs requires this step to make last hole */
+ /* is this only nfs? */
+ do {
+ /* todo: signal_pending? */
+ err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
+ } while (err == -EAGAIN || err == -EINTR);
+ if (err == 1)
+ dst->f_pos--;
+ }
+
+ if (err == 1) {
+ ia = (void *)buf;
+ ia->ia_size = dst->f_pos;
+ ia->ia_valid = ATTR_SIZE | ATTR_FILE;
+ ia->ia_file = dst;
+ h_inode = file_inode(dst);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD2);
+ /* no delegation since it is just created */
+ err = vfsub_notify_change(&dst->f_path, ia,
+ /*delegated*/NULL);
+ inode_unlock(h_inode);
+ }
+ }
+
+ return err;
+}
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len)
+{
+ int err;
+ unsigned long blksize;
+ unsigned char do_kfree;
+ char *buf;
+ struct super_block *h_sb;
+
+ err = -ENOMEM;
+ h_sb = file_inode(dst)->i_sb;
+ blksize = h_sb->s_blocksize;
+ if (!blksize || PAGE_SIZE < blksize)
+ blksize = PAGE_SIZE;
+ AuDbg("blksize %lu\n", blksize);
+ do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *));
+ if (do_kfree)
+ buf = kmalloc(blksize, GFP_NOFS);
+ else
+ buf = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!buf))
+ goto out;
+
+ if (len > (1 << 22))
+ AuDbg("copying a large file %lld\n", (long long)len);
+
+ src->f_pos = 0;
+ dst->f_pos = 0;
+ err = au_do_copy_file(dst, src, len, buf, blksize);
+ if (do_kfree) {
+ AuDebugOn(!au_kfree_do_sz_test(blksize));
+ au_kfree_do_rcu(buf);
+ } else
+ free_page((unsigned long)buf);
+
+out:
+ return err;
+}
+
+static int au_do_copy(struct file *dst, struct file *src, loff_t len)
+{
+ int err;
+ struct super_block *h_src_sb;
+ struct inode *h_src_inode;
+
+ h_src_inode = file_inode(src);
+ h_src_sb = h_src_inode->i_sb;
+
+ /* XFS acquires inode_lock */
+ if (!au_test_xfs(h_src_sb))
+ err = au_copy_file(dst, src, len);
+ else {
+ inode_unlock_shared(h_src_inode);
+ err = au_copy_file(dst, src, len);
+ inode_lock_shared_nested(h_src_inode, AuLsc_I_CHILD);
+ }
+
+ return err;
+}
+
+static int au_clone_or_copy(struct file *dst, struct file *src, loff_t len)
+{
+ int err;
+ loff_t lo;
+ struct super_block *h_src_sb;
+ struct inode *h_src_inode;
+
+ h_src_inode = file_inode(src);
+ h_src_sb = h_src_inode->i_sb;
+ if (h_src_sb != file_inode(dst)->i_sb
+ || !dst->f_op->remap_file_range) {
+ err = au_do_copy(dst, src, len);
+ goto out;
+ }
+
+ if (!au_test_nfs(h_src_sb)) {
+ inode_unlock_shared(h_src_inode);
+ lo = vfsub_clone_file_range(src, dst, len);
+ inode_lock_shared_nested(h_src_inode, AuLsc_I_CHILD);
+ } else
+ lo = vfsub_clone_file_range(src, dst, len);
+ if (lo == len) {
+ err = 0;
+ goto out; /* success */
+ } else if (lo >= 0)
+ /* todo: possible? */
+ /* paritially succeeded */
+ AuDbg("lo %lld, len %lld. Retrying.\n", lo, len);
+ else if (lo != -EOPNOTSUPP) {
+ /* older XFS has a condition in cloning */
+ err = lo;
+ goto out;
+ }
+
+ /* the backend fs on NFS may not support cloning */
+ err = au_do_copy(dst, src, len);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * to support a sparse file which is opened with O_APPEND,
+ * we need to close the file.
+ */
+static int au_cp_regular(struct au_cp_generic *cpg)
+{
+ int err, i;
+ enum { SRC, DST };
+ struct {
+ aufs_bindex_t bindex;
+ unsigned int flags;
+ struct dentry *dentry;
+ int force_wr;
+ struct file *file;
+ } *f, file[] = {
+ {
+ .bindex = cpg->bsrc,
+ .flags = O_RDONLY | O_NOATIME | O_LARGEFILE,
+ },
+ {
+ .bindex = cpg->bdst,
+ .flags = O_WRONLY | O_NOATIME | O_LARGEFILE,
+ .force_wr = !!au_ftest_cpup(cpg->flags, RWDST),
+ }
+ };
+ struct au_branch *br;
+ struct super_block *sb, *h_src_sb;
+ struct inode *h_src_inode;
+ struct task_struct *tsk = current;
+
+ /* bsrc branch can be ro/rw. */
+ sb = cpg->dentry->d_sb;
+ f = file;
+ for (i = 0; i < 2; i++, f++) {
+ f->dentry = au_h_dptr(cpg->dentry, f->bindex);
+ f->file = au_h_open(cpg->dentry, f->bindex, f->flags,
+ /*file*/NULL, f->force_wr);
+ if (IS_ERR(f->file)) {
+ err = PTR_ERR(f->file);
+ if (i == SRC)
+ goto out;
+ else
+ goto out_src;
+ }
+ }
+
+ /* try stopping to update while we copyup */
+ h_src_inode = d_inode(file[SRC].dentry);
+ h_src_sb = h_src_inode->i_sb;
+ if (!au_test_nfs(h_src_sb))
+ IMustLock(h_src_inode);
+ err = au_clone_or_copy(file[DST].file, file[SRC].file, cpg->len);
+
+ /* i wonder if we had O_NO_DELAY_FPUT flag */
+ if (tsk->flags & PF_KTHREAD)
+ __fput_sync(file[DST].file);
+ else {
+ /* it happened actually */
+ fput(file[DST].file);
+ /*
+ * too bad.
+ * we have to call both since we don't know which place the file
+ * was added to.
+ */
+ task_work_run();
+ flush_delayed_fput();
+ }
+ br = au_sbr(sb, file[DST].bindex);
+ au_lcnt_dec(&br->br_nfiles);
+
+out_src:
+ fput(file[SRC].file);
+ br = au_sbr(sb, file[SRC].bindex);
+ au_lcnt_dec(&br->br_nfiles);
+out:
+ return err;
+}
+
+static int au_do_cpup_regular(struct au_cp_generic *cpg,
+ struct au_cpup_reg_attr *h_src_attr)
+{
+ int err, rerr;
+ loff_t l;
+ struct path h_path;
+ struct inode *h_src_inode, *h_dst_inode;
+
+ err = 0;
+ h_src_inode = au_h_iptr(d_inode(cpg->dentry), cpg->bsrc);
+ l = i_size_read(h_src_inode);
+ if (cpg->len == -1 || l < cpg->len)
+ cpg->len = l;
+ if (cpg->len) {
+ /* try stopping to update while we are referencing */
+ inode_lock_shared_nested(h_src_inode, AuLsc_I_CHILD);
+ au_pin_hdir_unlock(cpg->pin);
+
+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+ h_path.mnt = au_sbr_mnt(cpg->dentry->d_sb, cpg->bsrc);
+ h_src_attr->iflags = h_src_inode->i_flags;
+ if (!au_test_nfs(h_src_inode->i_sb))
+ err = vfsub_getattr(&h_path, &h_src_attr->st);
+ else {
+ inode_unlock_shared(h_src_inode);
+ err = vfsub_getattr(&h_path, &h_src_attr->st);
+ inode_lock_shared_nested(h_src_inode, AuLsc_I_CHILD);
+ }
+ if (unlikely(err)) {
+ inode_unlock_shared(h_src_inode);
+ goto out;
+ }
+ h_src_attr->valid = 1;
+ if (!au_test_nfs(h_src_inode->i_sb)) {
+ err = au_cp_regular(cpg);
+ inode_unlock_shared(h_src_inode);
+ } else {
+ inode_unlock_shared(h_src_inode);
+ err = au_cp_regular(cpg);
+ }
+ rerr = au_pin_hdir_relock(cpg->pin);
+ if (!err && rerr)
+ err = rerr;
+ }
+ if (!err && (h_src_inode->i_state & I_LINKABLE)) {
+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bdst);
+ h_dst_inode = d_inode(h_path.dentry);
+ spin_lock(&h_dst_inode->i_lock);
+ h_dst_inode->i_state |= I_LINKABLE;
+ spin_unlock(&h_dst_inode->i_lock);
+ }
+
+out:
+ return err;
+}
+
+static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src,
+ struct inode *h_dir)
+{
+ int err, symlen;
+ mm_segment_t old_fs;
+ union {
+ char *k;
+ char __user *u;
+ } sym;
+
+ err = -ENOMEM;
+ sym.k = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!sym.k))
+ goto out;
+
+ /* unnecessary to support mmap_sem since symlink is not mmap-able */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ symlen = vfs_readlink(h_src, sym.u, PATH_MAX);
+ err = symlen;
+ set_fs(old_fs);
+
+ if (symlen > 0) {
+ sym.k[symlen] = 0;
+ err = vfsub_symlink(h_dir, h_path, sym.k);
+ }
+ free_page((unsigned long)sym.k);
+
+out:
+ return err;
+}
+
+/*
+ * regardless 'acl' option, reset all ACL.
+ * All ACL will be copied up later from the original entry on the lower branch.
+ */
+static int au_reset_acl(struct inode *h_dir, struct path *h_path, umode_t mode)
+{
+ int err;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+
+ h_dentry = h_path->dentry;
+ h_inode = d_inode(h_dentry);
+ /* forget_all_cached_acls(h_inode)); */
+ err = vfsub_removexattr(h_dentry, XATTR_NAME_POSIX_ACL_ACCESS);
+ AuTraceErr(err);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ if (!err)
+ err = vfsub_acl_chmod(h_inode, mode);
+
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_do_cpup_dir(struct au_cp_generic *cpg, struct dentry *dst_parent,
+ struct inode *h_dir, struct path *h_path)
+{
+ int err;
+ struct inode *dir, *inode;
+
+ err = vfsub_removexattr(h_path->dentry, XATTR_NAME_POSIX_ACL_DEFAULT);
+ AuTraceErr(err);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ if (unlikely(err))
+ goto out;
+
+ /*
+ * strange behaviour from the users view,
+ * particularly setattr case
+ */
+ dir = d_inode(dst_parent);
+ if (au_ibtop(dir) == cpg->bdst)
+ au_cpup_attr_nlink(dir, /*force*/1);
+ inode = d_inode(cpg->dentry);
+ au_cpup_attr_nlink(inode, /*force*/1);
+
+out:
+ return err;
+}
+
+static noinline_for_stack
+int cpup_entry(struct au_cp_generic *cpg, struct dentry *dst_parent,
+ struct au_cpup_reg_attr *h_src_attr)
+{
+ int err;
+ umode_t mode;
+ unsigned int mnt_flags;
+ unsigned char isdir, isreg, force;
+ const unsigned char do_dt = !!au_ftest_cpup(cpg->flags, DTIME);
+ struct au_dtime dt;
+ struct path h_path;
+ struct dentry *h_src, *h_dst, *h_parent;
+ struct inode *h_inode, *h_dir;
+ struct super_block *sb;
+
+ /* bsrc branch can be ro/rw. */
+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+ h_inode = d_inode(h_src);
+ AuDebugOn(h_inode != au_h_iptr(d_inode(cpg->dentry), cpg->bsrc));
+
+ /* try stopping to be referenced while we are creating */
+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+ if (au_ftest_cpup(cpg->flags, RENAME))
+ AuDebugOn(strncmp(h_dst->d_name.name, AUFS_WH_PFX,
+ AUFS_WH_PFX_LEN));
+ h_parent = h_dst->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+ AuDebugOn(h_parent != h_dst->d_parent);
+
+ sb = cpg->dentry->d_sb;
+ h_path.mnt = au_sbr_mnt(sb, cpg->bdst);
+ if (do_dt) {
+ h_path.dentry = h_parent;
+ au_dtime_store(&dt, dst_parent, &h_path);
+ }
+ h_path.dentry = h_dst;
+
+ isreg = 0;
+ isdir = 0;
+ mode = h_inode->i_mode;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ isreg = 1;
+ err = vfsub_create(h_dir, &h_path, 0600, /*want_excl*/true);
+ if (!err)
+ err = au_do_cpup_regular(cpg, h_src_attr);
+ break;
+ case S_IFDIR:
+ isdir = 1;
+ err = vfsub_mkdir(h_dir, &h_path, mode);
+ if (!err)
+ err = au_do_cpup_dir(cpg, dst_parent, h_dir, &h_path);
+ break;
+ case S_IFLNK:
+ err = au_do_cpup_symlink(&h_path, h_src, h_dir);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ AuDebugOn(!capable(CAP_MKNOD));
+ /*FALLTHROUGH*/
+ case S_IFIFO:
+ case S_IFSOCK:
+ err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev);
+ break;
+ default:
+ AuIOErr("Unknown inode type 0%o\n", mode);
+ err = -EIO;
+ }
+ if (!err)
+ err = au_reset_acl(h_dir, &h_path, mode);
+
+ mnt_flags = au_mntflags(sb);
+ if (!au_opt_test(mnt_flags, UDBA_NONE)
+ && !isdir
+ && au_opt_test(mnt_flags, XINO)
+ && (h_inode->i_nlink == 1
+ || (h_inode->i_state & I_LINKABLE))
+ /* todo: unnecessary? */
+ /* && d_inode(cpg->dentry)->i_nlink == 1 */
+ && cpg->bdst < cpg->bsrc
+ && !au_ftest_cpup(cpg->flags, KEEPLINO))
+ au_xino_write(sb, cpg->bsrc, h_inode->i_ino, /*ino*/0);
+ /* ignore this error */
+
+ if (!err) {
+ force = 0;
+ if (isreg) {
+ force = !!cpg->len;
+ if (cpg->len == -1)
+ force = !!i_size_read(h_inode);
+ }
+ au_fhsm_wrote(sb, cpg->bdst, force);
+ }
+
+ if (do_dt)
+ au_dtime_revert(&dt);
+ return err;
+}
+
+static int au_do_ren_after_cpup(struct au_cp_generic *cpg, struct path *h_path)
+{
+ int err;
+ struct dentry *dentry, *h_dentry, *h_parent, *parent;
+ struct inode *h_dir;
+ aufs_bindex_t bdst;
+
+ dentry = cpg->dentry;
+ bdst = cpg->bdst;
+ h_dentry = au_h_dptr(dentry, bdst);
+ if (!au_ftest_cpup(cpg->flags, OVERWRITE)) {
+ dget(h_dentry);
+ au_set_h_dptr(dentry, bdst, NULL);
+ err = au_lkup_neg(dentry, bdst, /*wh*/0);
+ if (!err)
+ h_path->dentry = dget(au_h_dptr(dentry, bdst));
+ au_set_h_dptr(dentry, bdst, h_dentry);
+ } else {
+ err = 0;
+ parent = dget_parent(dentry);
+ h_parent = au_h_dptr(parent, bdst);
+ dput(parent);
+ h_path->dentry = vfsub_lkup_one(&dentry->d_name, h_parent);
+ if (IS_ERR(h_path->dentry))
+ err = PTR_ERR(h_path->dentry);
+ }
+ if (unlikely(err))
+ goto out;
+
+ h_parent = h_dentry->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+ AuDbg("%pd %pd\n", h_dentry, h_path->dentry);
+ /* no delegation since it is just created */
+ err = vfsub_rename(h_dir, h_dentry, h_dir, h_path, /*delegated*/NULL,
+ /*flags*/0);
+ dput(h_path->dentry);
+
+out:
+ return err;
+}
+
+/*
+ * copyup the @dentry from @bsrc to @bdst.
+ * the caller must set the both of lower dentries.
+ * @len is for truncating when it is -1 copyup the entire file.
+ * in link/rename cases, @dst_parent may be different from the real one.
+ * basic->bsrc can be larger than basic->bdst.
+ * aufs doesn't touch the credential so
+ * security_inode_copy_up{,_xattr}() are unnecessary.
+ */
+static int au_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+ int err, rerr;
+ aufs_bindex_t old_ibtop;
+ unsigned char isdir, plink;
+ struct dentry *h_src, *h_dst, *h_parent;
+ struct inode *dst_inode, *h_dir, *inode, *delegated, *src_inode;
+ struct super_block *sb;
+ struct au_branch *br;
+ /* to reduce stack size */
+ struct {
+ struct au_dtime dt;
+ struct path h_path;
+ struct au_cpup_reg_attr h_src_attr;
+ } *a;
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+ a->h_src_attr.valid = 0;
+
+ sb = cpg->dentry->d_sb;
+ br = au_sbr(sb, cpg->bdst);
+ a->h_path.mnt = au_br_mnt(br);
+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+ h_parent = h_dst->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+
+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+ inode = d_inode(cpg->dentry);
+
+ if (!dst_parent)
+ dst_parent = dget_parent(cpg->dentry);
+ else
+ dget(dst_parent);
+
+ plink = !!au_opt_test(au_mntflags(sb), PLINK);
+ dst_inode = au_h_iptr(inode, cpg->bdst);
+ if (dst_inode) {
+ if (unlikely(!plink)) {
+ err = -EIO;
+ AuIOErr("hi%lu(i%lu) exists on b%d "
+ "but plink is disabled\n",
+ dst_inode->i_ino, inode->i_ino, cpg->bdst);
+ goto out_parent;
+ }
+
+ if (dst_inode->i_nlink) {
+ const int do_dt = au_ftest_cpup(cpg->flags, DTIME);
+
+ h_src = au_plink_lkup(inode, cpg->bdst);
+ err = PTR_ERR(h_src);
+ if (IS_ERR(h_src))
+ goto out_parent;
+ if (unlikely(d_is_negative(h_src))) {
+ err = -EIO;
+ AuIOErr("i%lu exists on b%d "
+ "but not pseudo-linked\n",
+ inode->i_ino, cpg->bdst);
+ dput(h_src);
+ goto out_parent;
+ }
+
+ if (do_dt) {
+ a->h_path.dentry = h_parent;
+ au_dtime_store(&a->dt, dst_parent, &a->h_path);
+ }
+
+ a->h_path.dentry = h_dst;
+ delegated = NULL;
+ err = vfsub_link(h_src, h_dir, &a->h_path, &delegated);
+ if (!err && au_ftest_cpup(cpg->flags, RENAME))
+ err = au_do_ren_after_cpup(cpg, &a->h_path);
+ if (do_dt)
+ au_dtime_revert(&a->dt);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ dput(h_src);
+ goto out_parent;
+ } else
+ /* todo: cpup_wh_file? */
+ /* udba work */
+ au_update_ibrange(inode, /*do_put_zero*/1);
+ }
+
+ isdir = S_ISDIR(inode->i_mode);
+ old_ibtop = au_ibtop(inode);
+ err = cpup_entry(cpg, dst_parent, &a->h_src_attr);
+ if (unlikely(err))
+ goto out_rev;
+ dst_inode = d_inode(h_dst);
+ inode_lock_nested(dst_inode, AuLsc_I_CHILD2);
+ /* todo: necessary? */
+ /* au_pin_hdir_unlock(cpg->pin); */
+
+ err = cpup_iattr(cpg->dentry, cpg->bdst, h_src, &a->h_src_attr);
+ if (unlikely(err)) {
+ /* todo: necessary? */
+ /* au_pin_hdir_relock(cpg->pin); */ /* ignore an error */
+ inode_unlock(dst_inode);
+ goto out_rev;
+ }
+
+ if (cpg->bdst < old_ibtop) {
+ if (S_ISREG(inode->i_mode)) {
+ err = au_dy_iaop(inode, cpg->bdst, dst_inode);
+ if (unlikely(err)) {
+ /* ignore an error */
+ /* au_pin_hdir_relock(cpg->pin); */
+ inode_unlock(dst_inode);
+ goto out_rev;
+ }
+ }
+ au_set_ibtop(inode, cpg->bdst);
+ } else
+ au_set_ibbot(inode, cpg->bdst);
+ au_set_h_iptr(inode, cpg->bdst, au_igrab(dst_inode),
+ au_hi_flags(inode, isdir));
+
+ /* todo: necessary? */
+ /* err = au_pin_hdir_relock(cpg->pin); */
+ inode_unlock(dst_inode);
+ if (unlikely(err))
+ goto out_rev;
+
+ src_inode = d_inode(h_src);
+ if (!isdir
+ && (src_inode->i_nlink > 1
+ || src_inode->i_state & I_LINKABLE)
+ && plink)
+ au_plink_append(inode, cpg->bdst, h_dst);
+
+ if (au_ftest_cpup(cpg->flags, RENAME)) {
+ a->h_path.dentry = h_dst;
+ err = au_do_ren_after_cpup(cpg, &a->h_path);
+ }
+ if (!err)
+ goto out_parent; /* success */
+
+ /* revert */
+out_rev:
+ a->h_path.dentry = h_parent;
+ au_dtime_store(&a->dt, dst_parent, &a->h_path);
+ a->h_path.dentry = h_dst;
+ rerr = 0;
+ if (d_is_positive(h_dst)) {
+ if (!isdir) {
+ /* no delegation since it is just created */
+ rerr = vfsub_unlink(h_dir, &a->h_path,
+ /*delegated*/NULL, /*force*/0);
+ } else
+ rerr = vfsub_rmdir(h_dir, &a->h_path);
+ }
+ au_dtime_revert(&a->dt);
+ if (rerr) {
+ AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr);
+ err = -EIO;
+ }
+out_parent:
+ dput(dst_parent);
+ au_kfree_rcu(a);
+out:
+ return err;
+}
+
+#if 0 /* reserved */
+struct au_cpup_single_args {
+ int *errp;
+ struct au_cp_generic *cpg;
+ struct dentry *dst_parent;
+};
+
+static void au_call_cpup_single(void *args)
+{
+ struct au_cpup_single_args *a = args;
+
+ au_pin_hdir_acquire_nest(a->cpg->pin);
+ *a->errp = au_cpup_single(a->cpg, a->dst_parent);
+ au_pin_hdir_release(a->cpg->pin);
+}
+#endif
+
+/*
+ * prevent SIGXFSZ in copy-up.
+ * testing CAP_MKNOD is for generic fs,
+ * but CAP_FSETID is for xfs only, currently.
+ */
+static int au_cpup_sio_test(struct au_pin *pin, umode_t mode)
+{
+ int do_sio;
+ struct super_block *sb;
+ struct inode *h_dir;
+
+ do_sio = 0;
+ sb = au_pinned_parent(pin)->d_sb;
+ if (!au_wkq_test()
+ && (!au_sbi(sb)->si_plink_maint_pid
+ || au_plink_maint(sb, AuLock_NOPLM))) {
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ /* no condition about RLIMIT_FSIZE and the file size */
+ do_sio = 1;
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ do_sio = !capable(CAP_MKNOD);
+ break;
+ }
+ if (!do_sio)
+ do_sio = ((mode & (S_ISUID | S_ISGID))
+ && !capable(CAP_FSETID));
+ /* this workaround may be removed in the future */
+ if (!do_sio) {
+ h_dir = au_pinned_h_dir(pin);
+ do_sio = h_dir->i_mode & S_ISVTX;
+ }
+ }
+
+ return do_sio;
+}
+
+#if 0 /* reserved */
+int au_sio_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+ int err, wkq_err;
+ struct dentry *h_dentry;
+
+ h_dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+ if (!au_cpup_sio_test(pin, d_inode(h_dentry)->i_mode))
+ err = au_cpup_single(cpg, dst_parent);
+ else {
+ struct au_cpup_single_args args = {
+ .errp = &err,
+ .cpg = cpg,
+ .dst_parent = dst_parent
+ };
+ wkq_err = au_wkq_wait(au_call_cpup_single, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ return err;
+}
+#endif
+
+/*
+ * copyup the @dentry from the first active lower branch to @bdst,
+ * using au_cpup_single().
+ */
+static int au_cpup_simple(struct au_cp_generic *cpg)
+{
+ int err;
+ unsigned int flags_orig;
+ struct dentry *dentry;
+
+ AuDebugOn(cpg->bsrc < 0);
+
+ dentry = cpg->dentry;
+ DiMustWriteLock(dentry);
+
+ err = au_lkup_neg(dentry, cpg->bdst, /*wh*/1);
+ if (!err) {
+ flags_orig = cpg->flags;
+ au_fset_cpup(cpg->flags, RENAME);
+ err = au_cpup_single(cpg, NULL);
+ cpg->flags = flags_orig;
+ if (!err)
+ return 0; /* success */
+
+ /* revert */
+ au_set_h_dptr(dentry, cpg->bdst, NULL);
+ au_set_dbtop(dentry, cpg->bsrc);
+ }
+
+ return err;
+}
+
+struct au_cpup_simple_args {
+ int *errp;
+ struct au_cp_generic *cpg;
+};
+
+static void au_call_cpup_simple(void *args)
+{
+ struct au_cpup_simple_args *a = args;
+
+ au_pin_hdir_acquire_nest(a->cpg->pin);
+ *a->errp = au_cpup_simple(a->cpg);
+ au_pin_hdir_release(a->cpg->pin);
+}
+
+static int au_do_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+ int err, wkq_err;
+ struct dentry *dentry, *parent;
+ struct file *h_file;
+ struct inode *h_dir;
+
+ dentry = cpg->dentry;
+ h_file = NULL;
+ if (au_ftest_cpup(cpg->flags, HOPEN)) {
+ AuDebugOn(cpg->bsrc < 0);
+ h_file = au_h_open_pre(dentry, cpg->bsrc, /*force_wr*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+ }
+
+ parent = dget_parent(dentry);
+ h_dir = au_h_iptr(d_inode(parent), cpg->bdst);
+ if (!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE)
+ && !au_cpup_sio_test(cpg->pin, d_inode(dentry)->i_mode))
+ err = au_cpup_simple(cpg);
+ else {
+ struct au_cpup_simple_args args = {
+ .errp = &err,
+ .cpg = cpg
+ };
+ wkq_err = au_wkq_wait(au_call_cpup_simple, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ dput(parent);
+ if (h_file)
+ au_h_open_post(dentry, cpg->bsrc, h_file);
+
+out:
+ return err;
+}
+
+int au_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+ aufs_bindex_t bsrc, bbot;
+ struct dentry *dentry, *h_dentry;
+
+ if (cpg->bsrc < 0) {
+ dentry = cpg->dentry;
+ bbot = au_dbbot(dentry);
+ for (bsrc = cpg->bdst + 1; bsrc <= bbot; bsrc++) {
+ h_dentry = au_h_dptr(dentry, bsrc);
+ if (h_dentry) {
+ AuDebugOn(d_is_negative(h_dentry));
+ break;
+ }
+ }
+ AuDebugOn(bsrc > bbot);
+ cpg->bsrc = bsrc;
+ }
+ AuDebugOn(cpg->bsrc <= cpg->bdst);
+ return au_do_sio_cpup_simple(cpg);
+}
+
+int au_sio_cpdown_simple(struct au_cp_generic *cpg)
+{
+ AuDebugOn(cpg->bdst <= cpg->bsrc);
+ return au_do_sio_cpup_simple(cpg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * copyup the deleted file for writing.
+ */
+static int au_do_cpup_wh(struct au_cp_generic *cpg, struct dentry *wh_dentry,
+ struct file *file)
+{
+ int err;
+ unsigned int flags_orig;
+ aufs_bindex_t bsrc_orig;
+ struct au_dinfo *dinfo;
+ struct {
+ struct au_hdentry *hd;
+ struct dentry *h_dentry;
+ } hdst, hsrc;
+
+ dinfo = au_di(cpg->dentry);
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ bsrc_orig = cpg->bsrc;
+ cpg->bsrc = dinfo->di_btop;
+ hdst.hd = au_hdentry(dinfo, cpg->bdst);
+ hdst.h_dentry = hdst.hd->hd_dentry;
+ hdst.hd->hd_dentry = wh_dentry;
+ dinfo->di_btop = cpg->bdst;
+
+ hsrc.h_dentry = NULL;
+ if (file) {
+ hsrc.hd = au_hdentry(dinfo, cpg->bsrc);
+ hsrc.h_dentry = hsrc.hd->hd_dentry;
+ hsrc.hd->hd_dentry = au_hf_top(file)->f_path.dentry;
+ }
+ flags_orig = cpg->flags;
+ cpg->flags = !AuCpup_DTIME;
+ err = au_cpup_single(cpg, /*h_parent*/NULL);
+ cpg->flags = flags_orig;
+ if (file) {
+ if (!err)
+ err = au_reopen_nondir(file);
+ hsrc.hd->hd_dentry = hsrc.h_dentry;
+ }
+ hdst.hd->hd_dentry = hdst.h_dentry;
+ dinfo->di_btop = cpg->bsrc;
+ cpg->bsrc = bsrc_orig;
+
+ return err;
+}
+
+static int au_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+ int err;
+ aufs_bindex_t bdst;
+ struct au_dtime dt;
+ struct dentry *dentry, *parent, *h_parent, *wh_dentry;
+ struct au_branch *br;
+ struct path h_path;
+
+ dentry = cpg->dentry;
+ bdst = cpg->bdst;
+ br = au_sbr(dentry->d_sb, bdst);
+ parent = dget_parent(dentry);
+ h_parent = au_h_dptr(parent, bdst);
+ wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out;
+
+ h_path.dentry = h_parent;
+ h_path.mnt = au_br_mnt(br);
+ au_dtime_store(&dt, parent, &h_path);
+ err = au_do_cpup_wh(cpg, wh_dentry, file);
+ if (unlikely(err))
+ goto out_wh;
+
+ dget(wh_dentry);
+ h_path.dentry = wh_dentry;
+ if (!d_is_dir(wh_dentry)) {
+ /* no delegation since it is just created */
+ err = vfsub_unlink(d_inode(h_parent), &h_path,
+ /*delegated*/NULL, /*force*/0);
+ } else
+ err = vfsub_rmdir(d_inode(h_parent), &h_path);
+ if (unlikely(err)) {
+ AuIOErr("failed remove copied-up tmp file %pd(%d)\n",
+ wh_dentry, err);
+ err = -EIO;
+ }
+ au_dtime_revert(&dt);
+ au_set_hi_wh(d_inode(dentry), bdst, wh_dentry);
+
+out_wh:
+ dput(wh_dentry);
+out:
+ dput(parent);
+ return err;
+}
+
+struct au_cpup_wh_args {
+ int *errp;
+ struct au_cp_generic *cpg;
+ struct file *file;
+};
+
+static void au_call_cpup_wh(void *args)
+{
+ struct au_cpup_wh_args *a = args;
+
+ au_pin_hdir_acquire_nest(a->cpg->pin);
+ *a->errp = au_cpup_wh(a->cpg, a->file);
+ au_pin_hdir_release(a->cpg->pin);
+}
+
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+ int err, wkq_err;
+ aufs_bindex_t bdst;
+ struct dentry *dentry, *parent, *h_orph, *h_parent;
+ struct inode *dir, *h_dir, *h_tmpdir;
+ struct au_wbr *wbr;
+ struct au_pin wh_pin, *pin_orig;
+
+ dentry = cpg->dentry;
+ bdst = cpg->bdst;
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ h_orph = NULL;
+ h_parent = NULL;
+ h_dir = au_igrab(au_h_iptr(dir, bdst));
+ h_tmpdir = h_dir;
+ pin_orig = NULL;
+ if (!h_dir->i_nlink) {
+ wbr = au_sbr(dentry->d_sb, bdst)->br_wbr;
+ h_orph = wbr->wbr_orph;
+
+ h_parent = dget(au_h_dptr(parent, bdst));
+ au_set_h_dptr(parent, bdst, dget(h_orph));
+ h_tmpdir = d_inode(h_orph);
+ au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0);
+
+ inode_lock_nested(h_tmpdir, AuLsc_I_PARENT3);
+ /* todo: au_h_open_pre()? */
+
+ pin_orig = cpg->pin;
+ au_pin_init(&wh_pin, dentry, bdst, AuLsc_DI_PARENT,
+ AuLsc_I_PARENT3, cpg->pin->udba, AuPin_DI_LOCKED);
+ cpg->pin = &wh_pin;
+ }
+
+ if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE)
+ && !au_cpup_sio_test(cpg->pin, d_inode(dentry)->i_mode))
+ err = au_cpup_wh(cpg, file);
+ else {
+ struct au_cpup_wh_args args = {
+ .errp = &err,
+ .cpg = cpg,
+ .file = file
+ };
+ wkq_err = au_wkq_wait(au_call_cpup_wh, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ if (h_orph) {
+ inode_unlock(h_tmpdir);
+ /* todo: au_h_open_post()? */
+ au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0);
+ au_set_h_dptr(parent, bdst, h_parent);
+ AuDebugOn(!pin_orig);
+ cpg->pin = pin_orig;
+ }
+ iput(h_dir);
+ dput(parent);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generic routine for both of copy-up and copy-down.
+ */
+/* cf. revalidate function in file.c */
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+ struct au_pin *pin,
+ struct dentry *h_parent, void *arg),
+ void *arg)
+{
+ int err;
+ struct au_pin pin;
+ struct dentry *d, *parent, *h_parent, *real_parent, *h_dentry;
+
+ err = 0;
+ parent = dget_parent(dentry);
+ if (IS_ROOT(parent))
+ goto out;
+
+ au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2,
+ au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE);
+
+ /* do not use au_dpage */
+ real_parent = parent;
+ while (1) {
+ dput(parent);
+ parent = dget_parent(dentry);
+ h_parent = au_h_dptr(parent, bdst);
+ if (h_parent)
+ goto out; /* success */
+
+ /* find top dir which is necessary to cpup */
+ do {
+ d = parent;
+ dput(parent);
+ parent = dget_parent(d);
+ di_read_lock_parent3(parent, !AuLock_IR);
+ h_parent = au_h_dptr(parent, bdst);
+ di_read_unlock(parent, !AuLock_IR);
+ } while (!h_parent);
+
+ if (d != real_parent)
+ di_write_lock_child3(d);
+
+ /* somebody else might create while we were sleeping */
+ h_dentry = au_h_dptr(d, bdst);
+ if (!h_dentry || d_is_negative(h_dentry)) {
+ if (h_dentry)
+ au_update_dbtop(d);
+
+ au_pin_set_dentry(&pin, d);
+ err = au_do_pin(&pin);
+ if (!err) {
+ err = cp(d, bdst, &pin, h_parent, arg);
+ au_unpin(&pin);
+ }
+ }
+
+ if (d != real_parent)
+ di_write_unlock(d);
+ if (unlikely(err))
+ break;
+ }
+
+out:
+ dput(parent);
+ return err;
+}
+
+static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst,
+ struct au_pin *pin,
+ struct dentry *h_parent __maybe_unused,
+ void *arg __maybe_unused)
+{
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = bdst,
+ .bsrc = -1,
+ .len = 0,
+ .pin = pin,
+ .flags = AuCpup_DTIME
+ };
+ return au_sio_cpup_simple(&cpg);
+}
+
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+ return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL);
+}
+
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+ int err;
+ struct dentry *parent;
+ struct inode *dir;
+
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ err = 0;
+ if (au_h_iptr(dir, bdst))
+ goto out;
+
+ di_read_unlock(parent, AuLock_IR);
+ di_write_lock_parent(parent);
+ /* someone else might change our inode while we were sleeping */
+ if (!au_h_iptr(dir, bdst))
+ err = au_cpup_dirs(dentry, bdst);
+ di_downgrade_lock(parent, AuLock_IR);
+
+out:
+ dput(parent);
+ return err;
+}
diff --git a/fs/aufs/cpup.h b/fs/aufs/cpup.h
new file mode 100644
index 000000000000..abcbfbc55a5c
--- /dev/null
+++ b/fs/aufs/cpup.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * copy-up/down functions
+ */
+
+#ifndef __AUFS_CPUP_H__
+#define __AUFS_CPUP_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct inode;
+struct file;
+struct au_pin;
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags);
+void au_cpup_attr_timesizes(struct inode *inode);
+void au_cpup_attr_nlink(struct inode *inode, int force);
+void au_cpup_attr_changeable(struct inode *inode);
+void au_cpup_igen(struct inode *inode, struct inode *h_inode);
+void au_cpup_attr_all(struct inode *inode, int force);
+
+/* ---------------------------------------------------------------------- */
+
+struct au_cp_generic {
+ struct dentry *dentry;
+ aufs_bindex_t bdst, bsrc;
+ loff_t len;
+ struct au_pin *pin;
+ unsigned int flags;
+};
+
+/* cpup flags */
+#define AuCpup_DTIME 1 /* do dtime_store/revert */
+#define AuCpup_KEEPLINO (1 << 1) /* do not clear the lower xino,
+ for link(2) */
+#define AuCpup_RENAME (1 << 2) /* rename after cpup */
+#define AuCpup_HOPEN (1 << 3) /* call h_open_pre/post() in
+ cpup */
+#define AuCpup_OVERWRITE (1 << 4) /* allow overwriting the
+ existing entry */
+#define AuCpup_RWDST (1 << 5) /* force write target even if
+ the branch is marked as RO */
+
+#ifndef CONFIG_AUFS_BR_HFSPLUS
+#undef AuCpup_HOPEN
+#define AuCpup_HOPEN 0
+#endif
+
+#define au_ftest_cpup(flags, name) ((flags) & AuCpup_##name)
+#define au_fset_cpup(flags, name) \
+ do { (flags) |= AuCpup_##name; } while (0)
+#define au_fclr_cpup(flags, name) \
+ do { (flags) &= ~AuCpup_##name; } while (0)
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len);
+int au_sio_cpup_simple(struct au_cp_generic *cpg);
+int au_sio_cpdown_simple(struct au_cp_generic *cpg);
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file);
+
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+ struct au_pin *pin,
+ struct dentry *h_parent, void *arg),
+ void *arg);
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+
+/* ---------------------------------------------------------------------- */
+
+/* keep timestamps when copyup */
+struct au_dtime {
+ struct dentry *dt_dentry;
+ struct path dt_h_path;
+ struct timespec64 dt_atime, dt_mtime;
+};
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+ struct path *h_path);
+void au_dtime_revert(struct au_dtime *dt);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_CPUP_H__ */
diff --git a/fs/aufs/dbgaufs.c b/fs/aufs/dbgaufs.c
new file mode 100644
index 000000000000..e32d91d1f638
--- /dev/null
+++ b/fs/aufs/dbgaufs.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * debugfs interface
+ */
+
+#include <linux/debugfs.h>
+#include "aufs.h"
+
+#ifndef CONFIG_SYSFS
+#error DEBUG_FS depends upon SYSFS
+#endif
+
+static struct dentry *dbgaufs;
+static const mode_t dbgaufs_mode = 0444;
+
+/* 20 is max digits length of ulong 64 */
+struct dbgaufs_arg {
+ int n;
+ char a[20 * 4];
+};
+
+/*
+ * common function for all XINO files
+ */
+static int dbgaufs_xi_release(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ void *p;
+
+ p = file->private_data;
+ if (p) {
+ /* this is struct dbgaufs_arg */
+ AuDebugOn(!au_kfree_sz_test(p));
+ au_kfree_do_rcu(p);
+ }
+ return 0;
+}
+
+static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt,
+ int cnt)
+{
+ int err;
+ struct kstat st;
+ struct dbgaufs_arg *p;
+
+ err = -ENOMEM;
+ p = kmalloc(sizeof(*p), GFP_NOFS);
+ if (unlikely(!p))
+ goto out;
+
+ err = 0;
+ p->n = 0;
+ file->private_data = p;
+ if (!xf)
+ goto out;
+
+ err = vfsub_getattr(&xf->f_path, &st);
+ if (!err) {
+ if (do_fcnt)
+ p->n = snprintf
+ (p->a, sizeof(p->a), "%d, %llux%u %lld\n",
+ cnt, st.blocks, st.blksize,
+ (long long)st.size);
+ else
+ p->n = snprintf(p->a, sizeof(p->a), "%llux%u %lld\n",
+ st.blocks, st.blksize,
+ (long long)st.size);
+ AuDebugOn(p->n >= sizeof(p->a));
+ } else {
+ p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err);
+ err = 0;
+ }
+
+out:
+ return err;
+}
+
+static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgaufs_arg *p;
+
+ p = file->private_data;
+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dbgaufs_plink_arg {
+ int n;
+ char a[];
+};
+
+static int dbgaufs_plink_release(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ free_page((unsigned long)file->private_data);
+ return 0;
+}
+
+static int dbgaufs_plink_open(struct inode *inode, struct file *file)
+{
+ int err, i, limit;
+ unsigned long n, sum;
+ struct dbgaufs_plink_arg *p;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+ struct hlist_bl_head *hbl;
+
+ err = -ENOMEM;
+ p = (void *)get_zeroed_page(GFP_NOFS);
+ if (unlikely(!p))
+ goto out;
+
+ err = -EFBIG;
+ sbinfo = inode->i_private;
+ sb = sbinfo->si_sb;
+ si_noflush_read_lock(sb);
+ if (au_opt_test(au_mntflags(sb), PLINK)) {
+ limit = PAGE_SIZE - sizeof(p->n);
+
+ /* the number of buckets */
+ n = snprintf(p->a + p->n, limit, "%d\n", AuPlink_NHASH);
+ p->n += n;
+ limit -= n;
+
+ sum = 0;
+ for (i = 0, hbl = sbinfo->si_plink; i < AuPlink_NHASH;
+ i++, hbl++) {
+ n = au_hbl_count(hbl);
+ sum += n;
+
+ n = snprintf(p->a + p->n, limit, "%lu ", n);
+ p->n += n;
+ limit -= n;
+ if (unlikely(limit <= 0))
+ goto out_free;
+ }
+ p->a[p->n - 1] = '\n';
+
+ /* the sum of plinks */
+ n = snprintf(p->a + p->n, limit, "%lu\n", sum);
+ p->n += n;
+ limit -= n;
+ if (unlikely(limit <= 0))
+ goto out_free;
+ } else {
+#define str "1\n0\n0\n"
+ p->n = sizeof(str) - 1;
+ strcpy(p->a, str);
+#undef str
+ }
+ si_read_unlock(sb);
+
+ err = 0;
+ file->private_data = p;
+ goto out; /* success */
+
+out_free:
+ free_page((unsigned long)p);
+out:
+ return err;
+}
+
+static ssize_t dbgaufs_plink_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgaufs_plink_arg *p;
+
+ p = file->private_data;
+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+static const struct file_operations dbgaufs_plink_fop = {
+ .owner = THIS_MODULE,
+ .open = dbgaufs_plink_open,
+ .release = dbgaufs_plink_release,
+ .read = dbgaufs_plink_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int dbgaufs_xib_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+
+ sbinfo = inode->i_private;
+ sb = sbinfo->si_sb;
+ si_noflush_read_lock(sb);
+ err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0, /*cnt*/0);
+ si_read_unlock(sb);
+ return err;
+}
+
+static const struct file_operations dbgaufs_xib_fop = {
+ .owner = THIS_MODULE,
+ .open = dbgaufs_xib_open,
+ .release = dbgaufs_xi_release,
+ .read = dbgaufs_xi_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+#define DbgaufsXi_PREFIX "xi"
+
+static int dbgaufs_xino_open(struct inode *inode, struct file *file)
+{
+ int err, idx;
+ long l;
+ aufs_bindex_t bindex;
+ char *p, a[sizeof(DbgaufsXi_PREFIX) + 8];
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+ struct au_xino *xi;
+ struct file *xf;
+ struct qstr *name;
+ struct au_branch *br;
+
+ err = -ENOENT;
+ name = &file->f_path.dentry->d_name;
+ if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX)
+ || memcmp(name->name, DbgaufsXi_PREFIX,
+ sizeof(DbgaufsXi_PREFIX) - 1)))
+ goto out;
+
+ AuDebugOn(name->len >= sizeof(a));
+ memcpy(a, name->name, name->len);
+ a[name->len] = '\0';
+ p = strchr(a, '-');
+ if (p)
+ *p = '\0';
+ err = kstrtol(a + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l);
+ if (unlikely(err))
+ goto out;
+ bindex = l;
+ idx = 0;
+ if (p) {
+ err = kstrtol(p + 1, 10, &l);
+ if (unlikely(err))
+ goto out;
+ idx = l;
+ }
+
+ err = -ENOENT;
+ sbinfo = inode->i_private;
+ sb = sbinfo->si_sb;
+ si_noflush_read_lock(sb);
+ if (unlikely(bindex < 0 || bindex > au_sbbot(sb)))
+ goto out_si;
+ br = au_sbr(sb, bindex);
+ xi = br->br_xino;
+ if (unlikely(idx >= xi->xi_nfile))
+ goto out_si;
+ xf = au_xino_file(xi, idx);
+ if (xf)
+ err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1,
+ au_xino_count(br));
+
+out_si:
+ si_read_unlock(sb);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static const struct file_operations dbgaufs_xino_fop = {
+ .owner = THIS_MODULE,
+ .open = dbgaufs_xino_open,
+ .release = dbgaufs_xi_release,
+ .read = dbgaufs_xi_read
+};
+
+void dbgaufs_xino_del(struct au_branch *br)
+{
+ struct dentry *dbgaufs;
+
+ dbgaufs = br->br_dbgaufs;
+ if (!dbgaufs)
+ return;
+
+ br->br_dbgaufs = NULL;
+ /* debugfs acquires the parent i_mutex */
+ lockdep_off();
+ debugfs_remove(dbgaufs);
+ lockdep_on();
+}
+
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+ aufs_bindex_t bbot;
+ struct au_branch *br;
+
+ if (!au_sbi(sb)->si_dbgaufs)
+ return;
+
+ bbot = au_sbbot(sb);
+ for (; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ dbgaufs_xino_del(br);
+ }
+}
+
+static void dbgaufs_br_do_add(struct super_block *sb, aufs_bindex_t bindex,
+ unsigned int idx, struct dentry *parent,
+ struct au_sbinfo *sbinfo)
+{
+ struct au_branch *br;
+ struct dentry *d;
+ /* "xi" bindex(5) "-" idx(2) NULL */
+ char name[sizeof(DbgaufsXi_PREFIX) + 8];
+
+ if (!idx)
+ snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex);
+ else
+ snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d-%u",
+ bindex, idx);
+ br = au_sbr(sb, bindex);
+ if (br->br_dbgaufs) {
+ struct qstr qstr = QSTR_INIT(name, strlen(name));
+
+ if (!au_qstreq(&br->br_dbgaufs->d_name, &qstr)) {
+ /* debugfs acquires the parent i_mutex */
+ lockdep_off();
+ d = debugfs_rename(parent, br->br_dbgaufs, parent,
+ name);
+ lockdep_on();
+ if (unlikely(!d))
+ pr_warn("failed renaming %pd/%s, ignored.\n",
+ parent, name);
+ }
+ } else {
+ lockdep_off();
+ br->br_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent,
+ sbinfo, &dbgaufs_xino_fop);
+ lockdep_on();
+ if (unlikely(!br->br_dbgaufs))
+ pr_warn("failed creating %pd/%s, ignored.\n",
+ parent, name);
+ }
+}
+
+static void dbgaufs_br_add(struct super_block *sb, aufs_bindex_t bindex,
+ struct dentry *parent, struct au_sbinfo *sbinfo)
+{
+ struct au_branch *br;
+ struct au_xino *xi;
+ unsigned int u;
+
+ br = au_sbr(sb, bindex);
+ xi = br->br_xino;
+ for (u = 0; u < xi->xi_nfile; u++)
+ dbgaufs_br_do_add(sb, bindex, u, parent, sbinfo);
+}
+
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex, int topdown)
+{
+ struct au_sbinfo *sbinfo;
+ struct dentry *parent;
+ aufs_bindex_t bbot;
+
+ if (!au_opt_test(au_mntflags(sb), XINO))
+ return;
+
+ sbinfo = au_sbi(sb);
+ parent = sbinfo->si_dbgaufs;
+ if (!parent)
+ return;
+
+ bbot = au_sbbot(sb);
+ if (topdown)
+ for (; bindex <= bbot; bindex++)
+ dbgaufs_br_add(sb, bindex, parent, sbinfo);
+ else
+ for (; bbot >= bindex; bbot--)
+ dbgaufs_br_add(sb, bbot, parent, sbinfo);
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+static int dbgaufs_xigen_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+
+ sbinfo = inode->i_private;
+ sb = sbinfo->si_sb;
+ si_noflush_read_lock(sb);
+ err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0, /*cnt*/0);
+ si_read_unlock(sb);
+ return err;
+}
+
+static const struct file_operations dbgaufs_xigen_fop = {
+ .owner = THIS_MODULE,
+ .open = dbgaufs_xigen_open,
+ .release = dbgaufs_xi_release,
+ .read = dbgaufs_xi_read
+};
+
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+ int err;
+
+ /*
+ * This function is a dynamic '__init' function actually,
+ * so the tiny check for si_rwsem is unnecessary.
+ */
+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+ err = -EIO;
+ sbinfo->si_dbgaufs_xigen = debugfs_create_file
+ ("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+ &dbgaufs_xigen_fop);
+ if (sbinfo->si_dbgaufs_xigen)
+ err = 0;
+
+ return err;
+}
+#else
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+ return 0;
+}
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo)
+{
+ /*
+ * This function is a dynamic '__fin' function actually,
+ * so the tiny check for si_rwsem is unnecessary.
+ */
+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+ debugfs_remove_recursive(sbinfo->si_dbgaufs);
+ sbinfo->si_dbgaufs = NULL;
+}
+
+int dbgaufs_si_init(struct au_sbinfo *sbinfo)
+{
+ int err;
+ char name[SysaufsSiNameLen];
+
+ /*
+ * This function is a dynamic '__init' function actually,
+ * so the tiny check for si_rwsem is unnecessary.
+ */
+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+ err = -ENOENT;
+ if (!dbgaufs) {
+ AuErr1("/debug/aufs is uninitialized\n");
+ goto out;
+ }
+
+ err = -EIO;
+ sysaufs_name(sbinfo, name);
+ sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs);
+ if (unlikely(!sbinfo->si_dbgaufs))
+ goto out;
+
+ /* regardless plink/noplink option */
+ sbinfo->si_dbgaufs_plink = debugfs_create_file
+ ("plink", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+ &dbgaufs_plink_fop);
+ if (unlikely(!sbinfo->si_dbgaufs_plink))
+ goto out_dir;
+
+ /* regardless xino/noxino option */
+ sbinfo->si_dbgaufs_xib = debugfs_create_file
+ ("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+ &dbgaufs_xib_fop);
+ if (unlikely(!sbinfo->si_dbgaufs_xib))
+ goto out_dir;
+
+ err = dbgaufs_xigen_init(sbinfo);
+ if (!err)
+ goto out; /* success */
+
+out_dir:
+ dbgaufs_si_fin(sbinfo);
+out:
+ if (unlikely(err))
+ pr_err("debugfs/aufs failed\n");
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_fin(void)
+{
+ debugfs_remove(dbgaufs);
+}
+
+int __init dbgaufs_init(void)
+{
+ int err;
+
+ err = -EIO;
+ dbgaufs = debugfs_create_dir(AUFS_NAME, NULL);
+ if (dbgaufs)
+ err = 0;
+ return err;
+}
diff --git a/fs/aufs/dbgaufs.h b/fs/aufs/dbgaufs.h
new file mode 100644
index 000000000000..1ea241bf0234
--- /dev/null
+++ b/fs/aufs/dbgaufs.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * debugfs interface
+ */
+
+#ifndef __DBGAUFS_H__
+#define __DBGAUFS_H__
+
+#ifdef __KERNEL__
+
+struct super_block;
+struct au_sbinfo;
+struct au_branch;
+
+#ifdef CONFIG_DEBUG_FS
+/* dbgaufs.c */
+void dbgaufs_xino_del(struct au_branch *br);
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex, int topdown);
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo);
+int dbgaufs_si_init(struct au_sbinfo *sbinfo);
+void dbgaufs_fin(void);
+int __init dbgaufs_init(void);
+#else
+AuStubVoid(dbgaufs_xino_del, struct au_branch *br)
+AuStubVoid(dbgaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(dbgaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex,
+ int topdown)
+AuStubVoid(dbgaufs_si_fin, struct au_sbinfo *sbinfo)
+AuStubInt0(dbgaufs_si_init, struct au_sbinfo *sbinfo)
+AuStubVoid(dbgaufs_fin, void)
+AuStubInt0(__init dbgaufs_init, void)
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __KERNEL__ */
+#endif /* __DBGAUFS_H__ */
diff --git a/fs/aufs/dcsub.c b/fs/aufs/dcsub.c
new file mode 100644
index 000000000000..0b9122307667
--- /dev/null
+++ b/fs/aufs/dcsub.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#include "aufs.h"
+
+static void au_dpage_free(struct au_dpage *dpage)
+{
+ int i;
+ struct dentry **p;
+
+ p = dpage->dentries;
+ for (i = 0; i < dpage->ndentry; i++)
+ dput(*p++);
+ free_page((unsigned long)dpage->dentries);
+}
+
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp)
+{
+ int err;
+ void *p;
+
+ err = -ENOMEM;
+ dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp);
+ if (unlikely(!dpages->dpages))
+ goto out;
+
+ p = (void *)__get_free_page(gfp);
+ if (unlikely(!p))
+ goto out_dpages;
+
+ dpages->dpages[0].ndentry = 0;
+ dpages->dpages[0].dentries = p;
+ dpages->ndpage = 1;
+ return 0; /* success */
+
+out_dpages:
+ au_kfree_try_rcu(dpages->dpages);
+out:
+ return err;
+}
+
+void au_dpages_free(struct au_dcsub_pages *dpages)
+{
+ int i;
+ struct au_dpage *p;
+
+ p = dpages->dpages;
+ for (i = 0; i < dpages->ndpage; i++)
+ au_dpage_free(p++);
+ au_kfree_try_rcu(dpages->dpages);
+}
+
+static int au_dpages_append(struct au_dcsub_pages *dpages,
+ struct dentry *dentry, gfp_t gfp)
+{
+ int err, sz;
+ struct au_dpage *dpage;
+ void *p;
+
+ dpage = dpages->dpages + dpages->ndpage - 1;
+ sz = PAGE_SIZE / sizeof(dentry);
+ if (unlikely(dpage->ndentry >= sz)) {
+ AuLabel(new dpage);
+ err = -ENOMEM;
+ sz = dpages->ndpage * sizeof(*dpages->dpages);
+ p = au_kzrealloc(dpages->dpages, sz,
+ sz + sizeof(*dpages->dpages), gfp,
+ /*may_shrink*/0);
+ if (unlikely(!p))
+ goto out;
+
+ dpages->dpages = p;
+ dpage = dpages->dpages + dpages->ndpage;
+ p = (void *)__get_free_page(gfp);
+ if (unlikely(!p))
+ goto out;
+
+ dpage->ndentry = 0;
+ dpage->dentries = p;
+ dpages->ndpage++;
+ }
+
+ AuDebugOn(au_dcount(dentry) <= 0);
+ dpage->dentries[dpage->ndentry++] = dget_dlock(dentry);
+ return 0; /* success */
+
+out:
+ return err;
+}
+
+/* todo: BAD approach */
+/* copied from linux/fs/dcache.c */
+enum d_walk_ret {
+ D_WALK_CONTINUE,
+ D_WALK_QUIT,
+ D_WALK_NORETRY,
+ D_WALK_SKIP,
+};
+
+extern void d_walk(struct dentry *parent, void *data,
+ enum d_walk_ret (*enter)(void *, struct dentry *));
+
+struct ac_dpages_arg {
+ int err;
+ struct au_dcsub_pages *dpages;
+ struct super_block *sb;
+ au_dpages_test test;
+ void *arg;
+};
+
+static enum d_walk_ret au_call_dpages_append(void *_arg, struct dentry *dentry)
+{
+ enum d_walk_ret ret;
+ struct ac_dpages_arg *arg = _arg;
+
+ ret = D_WALK_CONTINUE;
+ if (dentry->d_sb == arg->sb
+ && !IS_ROOT(dentry)
+ && au_dcount(dentry) > 0
+ && au_di(dentry)
+ && (!arg->test || arg->test(dentry, arg->arg))) {
+ arg->err = au_dpages_append(arg->dpages, dentry, GFP_ATOMIC);
+ if (unlikely(arg->err))
+ ret = D_WALK_QUIT;
+ }
+
+ return ret;
+}
+
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+ au_dpages_test test, void *arg)
+{
+ struct ac_dpages_arg args = {
+ .err = 0,
+ .dpages = dpages,
+ .sb = root->d_sb,
+ .test = test,
+ .arg = arg
+ };
+
+ d_walk(root, &args, au_call_dpages_append);
+
+ return args.err;
+}
+
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+ int do_include, au_dpages_test test, void *arg)
+{
+ int err;
+
+ err = 0;
+ write_seqlock(&rename_lock);
+ spin_lock(&dentry->d_lock);
+ if (do_include
+ && au_dcount(dentry) > 0
+ && (!test || test(dentry, arg)))
+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+ spin_unlock(&dentry->d_lock);
+ if (unlikely(err))
+ goto out;
+
+ /*
+ * RCU for vfsmount is unnecessary since this is a traverse in a single
+ * mount
+ */
+ while (!IS_ROOT(dentry)) {
+ dentry = dentry->d_parent; /* rename_lock is locked */
+ spin_lock(&dentry->d_lock);
+ if (au_dcount(dentry) > 0
+ && (!test || test(dentry, arg)))
+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+ spin_unlock(&dentry->d_lock);
+ if (unlikely(err))
+ break;
+ }
+
+out:
+ write_sequnlock(&rename_lock);
+ return err;
+}
+
+static inline int au_dcsub_dpages_aufs(struct dentry *dentry, void *arg)
+{
+ return au_di(dentry) && dentry->d_sb == arg;
+}
+
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+ struct dentry *dentry, int do_include)
+{
+ return au_dcsub_pages_rev(dpages, dentry, do_include,
+ au_dcsub_dpages_aufs, dentry->d_sb);
+}
+
+int au_test_subdir(struct dentry *d1, struct dentry *d2)
+{
+ struct path path[2] = {
+ {
+ .dentry = d1
+ },
+ {
+ .dentry = d2
+ }
+ };
+
+ return path_is_under(path + 0, path + 1);
+}
diff --git a/fs/aufs/dcsub.h b/fs/aufs/dcsub.h
new file mode 100644
index 000000000000..5e578995c256
--- /dev/null
+++ b/fs/aufs/dcsub.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#ifndef __AUFS_DCSUB_H__
+#define __AUFS_DCSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include <linux/fs.h>
+
+struct au_dpage {
+ int ndentry;
+ struct dentry **dentries;
+};
+
+struct au_dcsub_pages {
+ int ndpage;
+ struct au_dpage *dpages;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dcsub.c */
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp);
+void au_dpages_free(struct au_dcsub_pages *dpages);
+typedef int (*au_dpages_test)(struct dentry *dentry, void *arg);
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+ au_dpages_test test, void *arg);
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+ int do_include, au_dpages_test test, void *arg);
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+ struct dentry *dentry, int do_include);
+int au_test_subdir(struct dentry *d1, struct dentry *d2);
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * todo: in linux-3.13, several similar (but faster) helpers are added to
+ * include/linux/dcache.h. Try them (in the future).
+ */
+
+static inline int au_d_hashed_positive(struct dentry *d)
+{
+ int err;
+ struct inode *inode = d_inode(d);
+
+ err = 0;
+ if (unlikely(d_unhashed(d)
+ || d_is_negative(d)
+ || !inode->i_nlink))
+ err = -ENOENT;
+ return err;
+}
+
+static inline int au_d_linkable(struct dentry *d)
+{
+ int err;
+ struct inode *inode = d_inode(d);
+
+ err = au_d_hashed_positive(d);
+ if (err
+ && d_is_positive(d)
+ && (inode->i_state & I_LINKABLE))
+ err = 0;
+ return err;
+}
+
+static inline int au_d_alive(struct dentry *d)
+{
+ int err;
+ struct inode *inode;
+
+ err = 0;
+ if (!IS_ROOT(d))
+ err = au_d_hashed_positive(d);
+ else {
+ inode = d_inode(d);
+ if (unlikely(d_unlinked(d)
+ || d_is_negative(d)
+ || !inode->i_nlink))
+ err = -ENOENT;
+ }
+ return err;
+}
+
+static inline int au_alive_dir(struct dentry *d)
+{
+ int err;
+
+ err = au_d_alive(d);
+ if (unlikely(err || IS_DEADDIR(d_inode(d))))
+ err = -ENOENT;
+ return err;
+}
+
+static inline int au_qstreq(struct qstr *a, struct qstr *b)
+{
+ return a->len == b->len
+ && !memcmp(a->name, b->name, a->len);
+}
+
+/*
+ * by the commit
+ * 360f547 2015-01-25 dcache: let the dentry count go down to zero without
+ * taking d_lock
+ * the type of d_lockref.count became int, but the inlined function d_count()
+ * still returns unsigned int.
+ * I don't know why. Maybe it is for every d_count() users?
+ * Anyway au_dcount() lives on.
+ */
+static inline int au_dcount(struct dentry *d)
+{
+ return (int)d_count(d);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DCSUB_H__ */
diff --git a/fs/aufs/debug.c b/fs/aufs/debug.c
new file mode 100644
index 000000000000..8fa0575a08a8
--- /dev/null
+++ b/fs/aufs/debug.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * debug print functions
+ */
+
+#include "aufs.h"
+
+/* Returns 0, or -errno. arg is in kp->arg. */
+static int param_atomic_t_set(const char *val, const struct kernel_param *kp)
+{
+ int err, n;
+
+ err = kstrtoint(val, 0, &n);
+ if (!err) {
+ if (n > 0)
+ au_debug_on();
+ else
+ au_debug_off();
+ }
+ return err;
+}
+
+/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
+static int param_atomic_t_get(char *buffer, const struct kernel_param *kp)
+{
+ atomic_t *a;
+
+ a = kp->arg;
+ return sprintf(buffer, "%d", atomic_read(a));
+}
+
+static struct kernel_param_ops param_ops_atomic_t = {
+ .set = param_atomic_t_set,
+ .get = param_atomic_t_get
+ /* void (*free)(void *arg) */
+};
+
+atomic_t aufs_debug = ATOMIC_INIT(0);
+MODULE_PARM_DESC(debug, "debug print");
+module_param_named(debug, aufs_debug, atomic_t, 0664);
+
+DEFINE_MUTEX(au_dbg_mtx); /* just to serialize the dbg msgs */
+char *au_plevel = KERN_DEBUG;
+#define dpri(fmt, ...) do { \
+ if ((au_plevel \
+ && strcmp(au_plevel, KERN_DEBUG)) \
+ || au_debug_test()) \
+ printk("%s" fmt, au_plevel, ##__VA_ARGS__); \
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+void au_dpri_whlist(struct au_nhash *whlist)
+{
+ unsigned long ul, n;
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+
+ n = whlist->nh_num;
+ head = whlist->nh_head;
+ for (ul = 0; ul < n; ul++) {
+ hlist_for_each_entry(pos, head, wh_hash)
+ dpri("b%d, %.*s, %d\n",
+ pos->wh_bindex,
+ pos->wh_str.len, pos->wh_str.name,
+ pos->wh_str.len);
+ head++;
+ }
+}
+
+void au_dpri_vdir(struct au_vdir *vdir)
+{
+ unsigned long ul;
+ union au_vdir_deblk_p p;
+ unsigned char *o;
+
+ if (!vdir || IS_ERR(vdir)) {
+ dpri("err %ld\n", PTR_ERR(vdir));
+ return;
+ }
+
+ dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %llu\n",
+ vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk,
+ vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version);
+ for (ul = 0; ul < vdir->vd_nblk; ul++) {
+ p.deblk = vdir->vd_deblk[ul];
+ o = p.deblk;
+ dpri("[%lu]: %p\n", ul, o);
+ }
+}
+
+static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn,
+ struct dentry *wh)
+{
+ char *n = NULL;
+ int l = 0;
+
+ if (!inode || IS_ERR(inode)) {
+ dpri("i%d: err %ld\n", bindex, PTR_ERR(inode));
+ return -1;
+ }
+
+ /* the type of i_blocks depends upon CONFIG_LBDAF */
+ BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long)
+ && sizeof(inode->i_blocks) != sizeof(u64));
+ if (wh) {
+ n = (void *)wh->d_name.name;
+ l = wh->d_name.len;
+ }
+
+ dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu,"
+ " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n",
+ bindex, inode,
+ inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??",
+ atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode,
+ i_size_read(inode), (unsigned long long)inode->i_blocks,
+ hn, (long long)timespec64_to_ns(&inode->i_ctime) & 0x0ffff,
+ inode->i_mapping ? inode->i_mapping->nrpages : 0,
+ inode->i_state, inode->i_flags, inode_peek_iversion(inode),
+ inode->i_generation,
+ l ? ", wh " : "", l, n);
+ return 0;
+}
+
+void au_dpri_inode(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+ struct au_hinode *hi;
+ aufs_bindex_t bindex;
+ int err, hn;
+
+ err = do_pri_inode(-1, inode, -1, NULL);
+ if (err || !au_test_aufs(inode->i_sb) || au_is_bad_inode(inode))
+ return;
+
+ iinfo = au_ii(inode);
+ dpri("i-1: btop %d, bbot %d, gen %d\n",
+ iinfo->ii_btop, iinfo->ii_bbot, au_iigen(inode, NULL));
+ if (iinfo->ii_btop < 0)
+ return;
+ hn = 0;
+ for (bindex = iinfo->ii_btop; bindex <= iinfo->ii_bbot; bindex++) {
+ hi = au_hinode(iinfo, bindex);
+ hn = !!au_hn(hi);
+ do_pri_inode(bindex, hi->hi_inode, hn, hi->hi_whdentry);
+ }
+}
+
+void au_dpri_dalias(struct inode *inode)
+{
+ struct dentry *d;
+
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias)
+ au_dpri_dentry(d);
+ spin_unlock(&inode->i_lock);
+}
+
+static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry)
+{
+ struct dentry *wh = NULL;
+ int hn;
+ struct inode *inode;
+ struct au_iinfo *iinfo;
+ struct au_hinode *hi;
+
+ if (!dentry || IS_ERR(dentry)) {
+ dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry));
+ return -1;
+ }
+ /* do not call dget_parent() here */
+ /* note: access d_xxx without d_lock */
+ dpri("d%d: %p, %pd2?, %s, cnt %d, flags 0x%x, %shashed\n",
+ bindex, dentry, dentry,
+ dentry->d_sb ? au_sbtype(dentry->d_sb) : "??",
+ au_dcount(dentry), dentry->d_flags,
+ d_unhashed(dentry) ? "un" : "");
+ hn = -1;
+ inode = NULL;
+ if (d_is_positive(dentry))
+ inode = d_inode(dentry);
+ if (inode
+ && au_test_aufs(dentry->d_sb)
+ && bindex >= 0
+ && !au_is_bad_inode(inode)) {
+ iinfo = au_ii(inode);
+ hi = au_hinode(iinfo, bindex);
+ hn = !!au_hn(hi);
+ wh = hi->hi_whdentry;
+ }
+ do_pri_inode(bindex, inode, hn, wh);
+ return 0;
+}
+
+void au_dpri_dentry(struct dentry *dentry)
+{
+ struct au_dinfo *dinfo;
+ aufs_bindex_t bindex;
+ int err;
+
+ err = do_pri_dentry(-1, dentry);
+ if (err || !au_test_aufs(dentry->d_sb))
+ return;
+
+ dinfo = au_di(dentry);
+ if (!dinfo)
+ return;
+ dpri("d-1: btop %d, bbot %d, bwh %d, bdiropq %d, gen %d, tmp %d\n",
+ dinfo->di_btop, dinfo->di_bbot,
+ dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry),
+ dinfo->di_tmpfile);
+ if (dinfo->di_btop < 0)
+ return;
+ for (bindex = dinfo->di_btop; bindex <= dinfo->di_bbot; bindex++)
+ do_pri_dentry(bindex, au_hdentry(dinfo, bindex)->hd_dentry);
+}
+
+static int do_pri_file(aufs_bindex_t bindex, struct file *file)
+{
+ char a[32];
+
+ if (!file || IS_ERR(file)) {
+ dpri("f%d: err %ld\n", bindex, PTR_ERR(file));
+ return -1;
+ }
+ a[0] = 0;
+ if (bindex < 0
+ && !IS_ERR_OR_NULL(file->f_path.dentry)
+ && au_test_aufs(file->f_path.dentry->d_sb)
+ && au_fi(file))
+ snprintf(a, sizeof(a), ", gen %d, mmapped %d",
+ au_figen(file), atomic_read(&au_fi(file)->fi_mmapped));
+ dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, v %llu, pos %llu%s\n",
+ bindex, file->f_mode, file->f_flags, (long)file_count(file),
+ file->f_version, file->f_pos, a);
+ if (!IS_ERR_OR_NULL(file->f_path.dentry))
+ do_pri_dentry(bindex, file->f_path.dentry);
+ return 0;
+}
+
+void au_dpri_file(struct file *file)
+{
+ struct au_finfo *finfo;
+ struct au_fidir *fidir;
+ struct au_hfile *hfile;
+ aufs_bindex_t bindex;
+ int err;
+
+ err = do_pri_file(-1, file);
+ if (err
+ || IS_ERR_OR_NULL(file->f_path.dentry)
+ || !au_test_aufs(file->f_path.dentry->d_sb))
+ return;
+
+ finfo = au_fi(file);
+ if (!finfo)
+ return;
+ if (finfo->fi_btop < 0)
+ return;
+ fidir = finfo->fi_hdir;
+ if (!fidir)
+ do_pri_file(finfo->fi_btop, finfo->fi_htop.hf_file);
+ else
+ for (bindex = finfo->fi_btop;
+ bindex >= 0 && bindex <= fidir->fd_bbot;
+ bindex++) {
+ hfile = fidir->fd_hfile + bindex;
+ do_pri_file(bindex, hfile ? hfile->hf_file : NULL);
+ }
+}
+
+static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br)
+{
+ struct vfsmount *mnt;
+ struct super_block *sb;
+
+ if (!br || IS_ERR(br))
+ goto out;
+ mnt = au_br_mnt(br);
+ if (!mnt || IS_ERR(mnt))
+ goto out;
+ sb = mnt->mnt_sb;
+ if (!sb || IS_ERR(sb))
+ goto out;
+
+ dpri("s%d: {perm 0x%x, id %d, wbr %p}, "
+ "%s, dev 0x%02x%02x, flags 0x%lx, cnt %d, active %d, "
+ "xino %d\n",
+ bindex, br->br_perm, br->br_id, br->br_wbr,
+ au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ sb->s_flags, sb->s_count,
+ atomic_read(&sb->s_active),
+ !!au_xino_file(br->br_xino, /*idx*/-1));
+ return 0;
+
+out:
+ dpri("s%d: err %ld\n", bindex, PTR_ERR(br));
+ return -1;
+}
+
+void au_dpri_sb(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+ aufs_bindex_t bindex;
+ int err;
+ /* to reduce stack size */
+ struct {
+ struct vfsmount mnt;
+ struct au_branch fake;
+ } *a;
+
+ /* this function can be called from magic sysrq */
+ a = kzalloc(sizeof(*a), GFP_ATOMIC);
+ if (unlikely(!a)) {
+ dpri("no memory\n");
+ return;
+ }
+
+ a->mnt.mnt_sb = sb;
+ a->fake.br_path.mnt = &a->mnt;
+ err = do_pri_br(-1, &a->fake);
+ au_kfree_rcu(a);
+ dpri("dev 0x%x\n", sb->s_dev);
+ if (err || !au_test_aufs(sb))
+ return;
+
+ sbinfo = au_sbi(sb);
+ if (!sbinfo)
+ return;
+ dpri("nw %d, gen %u, kobj %d\n",
+ atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation,
+ kref_read(&sbinfo->si_kobj.kref));
+ for (bindex = 0; bindex <= sbinfo->si_bbot; bindex++)
+ do_pri_br(bindex, sbinfo->si_branch[0 + bindex]);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line)
+{
+ struct inode *h_inode, *inode = d_inode(dentry);
+ struct dentry *h_dentry;
+ aufs_bindex_t bindex, bbot, bi;
+
+ if (!inode /* || au_di(dentry)->di_lsc == AuLsc_DI_TMP */)
+ return;
+
+ bbot = au_dbbot(dentry);
+ bi = au_ibbot(inode);
+ if (bi < bbot)
+ bbot = bi;
+ bindex = au_dbtop(dentry);
+ bi = au_ibtop(inode);
+ if (bi > bindex)
+ bindex = bi;
+
+ for (; bindex <= bbot; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+ h_inode = au_h_iptr(inode, bindex);
+ if (unlikely(h_inode != d_inode(h_dentry))) {
+ au_debug_on();
+ AuDbg("b%d, %s:%d\n", bindex, func, line);
+ AuDbgDentry(dentry);
+ AuDbgInode(inode);
+ au_debug_off();
+ BUG();
+ }
+ }
+}
+
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen)
+{
+ int err, i, j;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry **dentries;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ AuDebugOn(err);
+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/1);
+ AuDebugOn(err);
+ for (i = dpages.ndpage - 1; !err && i >= 0; i--) {
+ dpage = dpages.dpages + i;
+ dentries = dpage->dentries;
+ for (j = dpage->ndentry - 1; !err && j >= 0; j--)
+ AuDebugOn(au_digen_test(dentries[j], sigen));
+ }
+ au_dpages_free(&dpages);
+}
+
+void au_dbg_verify_kthread(void)
+{
+ if (au_wkq_test()) {
+ au_dbg_blocked();
+ /*
+ * It may be recursive, but udba=notify between two aufs mounts,
+ * where a single ro branch is shared, is not a problem.
+ */
+ /* WARN_ON(1); */
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+int __init au_debug_init(void)
+{
+ aufs_bindex_t bindex;
+ struct au_vdir_destr destr;
+
+ bindex = -1;
+ AuDebugOn(bindex >= 0);
+
+ destr.len = -1;
+ AuDebugOn(destr.len < NAME_MAX);
+
+#ifdef CONFIG_4KSTACKS
+ pr_warn("CONFIG_4KSTACKS is defined.\n");
+#endif
+
+ return 0;
+}
diff --git a/fs/aufs/debug.h b/fs/aufs/debug.h
new file mode 100644
index 000000000000..9c52470a61fb
--- /dev/null
+++ b/fs/aufs/debug.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * debug print functions
+ */
+
+#ifndef __AUFS_DEBUG_H__
+#define __AUFS_DEBUG_H__
+
+#ifdef __KERNEL__
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/sysrq.h>
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuDebugOn(a) BUG_ON(a)
+
+/* module parameter */
+extern atomic_t aufs_debug;
+static inline void au_debug_on(void)
+{
+ atomic_inc(&aufs_debug);
+}
+static inline void au_debug_off(void)
+{
+ atomic_dec_if_positive(&aufs_debug);
+}
+
+static inline int au_debug_test(void)
+{
+ return atomic_read(&aufs_debug) > 0;
+}
+#else
+#define AuDebugOn(a) do {} while (0)
+AuStubVoid(au_debug_on, void)
+AuStubVoid(au_debug_off, void)
+AuStubInt0(au_debug_test, void)
+#endif /* CONFIG_AUFS_DEBUG */
+
+#define param_check_atomic_t(name, p) __param_check(name, p, atomic_t)
+
+/* ---------------------------------------------------------------------- */
+
+/* debug print */
+
+#define AuDbg(fmt, ...) do { \
+ if (au_debug_test()) \
+ pr_debug("DEBUG: " fmt, ##__VA_ARGS__); \
+} while (0)
+#define AuLabel(l) AuDbg(#l "\n")
+#define AuIOErr(fmt, ...) pr_err("I/O Error, " fmt, ##__VA_ARGS__)
+#define AuWarn1(fmt, ...) do { \
+ static unsigned char _c; \
+ if (!_c++) \
+ pr_warn(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuErr1(fmt, ...) do { \
+ static unsigned char _c; \
+ if (!_c++) \
+ pr_err(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuIOErr1(fmt, ...) do { \
+ static unsigned char _c; \
+ if (!_c++) \
+ AuIOErr(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuUnsupportMsg "This operation is not supported." \
+ " Please report this application to aufs-users ML."
+#define AuUnsupport(fmt, ...) do { \
+ pr_err(AuUnsupportMsg "\n" fmt, ##__VA_ARGS__); \
+ dump_stack(); \
+} while (0)
+
+#define AuTraceErr(e) do { \
+ if (unlikely((e) < 0)) \
+ AuDbg("err %d\n", (int)(e)); \
+} while (0)
+
+#define AuTraceErrPtr(p) do { \
+ if (IS_ERR(p)) \
+ AuDbg("err %ld\n", PTR_ERR(p)); \
+} while (0)
+
+/* dirty macros for debug print, use with "%.*s" and caution */
+#define AuLNPair(qstr) (qstr)->len, (qstr)->name
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry;
+#ifdef CONFIG_AUFS_DEBUG
+extern struct mutex au_dbg_mtx;
+extern char *au_plevel;
+struct au_nhash;
+void au_dpri_whlist(struct au_nhash *whlist);
+struct au_vdir;
+void au_dpri_vdir(struct au_vdir *vdir);
+struct inode;
+void au_dpri_inode(struct inode *inode);
+void au_dpri_dalias(struct inode *inode);
+void au_dpri_dentry(struct dentry *dentry);
+struct file;
+void au_dpri_file(struct file *filp);
+struct super_block;
+void au_dpri_sb(struct super_block *sb);
+
+#define au_dbg_verify_dinode(d) __au_dbg_verify_dinode(d, __func__, __LINE__)
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line);
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen);
+void au_dbg_verify_kthread(void);
+
+int __init au_debug_init(void);
+
+#define AuDbgWhlist(w) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#w "\n"); \
+ au_dpri_whlist(w); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgVdir(v) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#v "\n"); \
+ au_dpri_vdir(v); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgInode(i) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#i "\n"); \
+ au_dpri_inode(i); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgDAlias(i) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#i "\n"); \
+ au_dpri_dalias(i); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgDentry(d) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#d "\n"); \
+ au_dpri_dentry(d); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgFile(f) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#f "\n"); \
+ au_dpri_file(f); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgSb(sb) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#sb "\n"); \
+ au_dpri_sb(sb); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgSym(addr) do { \
+ char sym[KSYM_SYMBOL_LEN]; \
+ sprint_symbol(sym, (unsigned long)addr); \
+ AuDbg("%s\n", sym); \
+} while (0)
+#else
+AuStubVoid(au_dbg_verify_dinode, struct dentry *dentry)
+AuStubVoid(au_dbg_verify_gen, struct dentry *parent, unsigned int sigen)
+AuStubVoid(au_dbg_verify_kthread, void)
+AuStubInt0(__init au_debug_init, void)
+
+#define AuDbgWhlist(w) do {} while (0)
+#define AuDbgVdir(v) do {} while (0)
+#define AuDbgInode(i) do {} while (0)
+#define AuDbgDAlias(i) do {} while (0)
+#define AuDbgDentry(d) do {} while (0)
+#define AuDbgFile(f) do {} while (0)
+#define AuDbgSb(sb) do {} while (0)
+#define AuDbgSym(addr) do {} while (0)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+int __init au_sysrq_init(void);
+void au_sysrq_fin(void);
+
+#ifdef CONFIG_HW_CONSOLE
+#define au_dbg_blocked() do { \
+ WARN_ON(1); \
+ handle_sysrq('w'); \
+} while (0)
+#else
+AuStubVoid(au_dbg_blocked, void)
+#endif
+
+#else
+AuStubInt0(__init au_sysrq_init, void)
+AuStubVoid(au_sysrq_fin, void)
+AuStubVoid(au_dbg_blocked, void)
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DEBUG_H__ */
diff --git a/fs/aufs/dentry.c b/fs/aufs/dentry.c
new file mode 100644
index 000000000000..26b5fead4939
--- /dev/null
+++ b/fs/aufs/dentry.c
@@ -0,0 +1,1153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#include <linux/namei.h>
+#include "aufs.h"
+
+/*
+ * returns positive/negative dentry, NULL or an error.
+ * NULL means whiteout-ed or not-found.
+ */
+static struct dentry*
+au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
+ aufs_bindex_t bindex, struct au_do_lookup_args *args)
+{
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ struct au_branch *br;
+ int wh_found, opq;
+ unsigned char wh_able;
+ const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG);
+ const unsigned char ignore_perm = !!au_ftest_lkup(args->flags,
+ IGNORE_PERM);
+
+ wh_found = 0;
+ br = au_sbr(dentry->d_sb, bindex);
+ wh_able = !!au_br_whable(br->br_perm);
+ if (wh_able)
+ wh_found = au_wh_test(h_parent, &args->whname, ignore_perm);
+ h_dentry = ERR_PTR(wh_found);
+ if (!wh_found)
+ goto real_lookup;
+ if (unlikely(wh_found < 0))
+ goto out;
+
+ /* We found a whiteout */
+ /* au_set_dbbot(dentry, bindex); */
+ au_set_dbwh(dentry, bindex);
+ if (!allow_neg)
+ return NULL; /* success */
+
+real_lookup:
+ if (!ignore_perm)
+ h_dentry = vfsub_lkup_one(args->name, h_parent);
+ else
+ h_dentry = au_sio_lkup_one(args->name, h_parent);
+ if (IS_ERR(h_dentry)) {
+ if (PTR_ERR(h_dentry) == -ENAMETOOLONG
+ && !allow_neg)
+ h_dentry = NULL;
+ goto out;
+ }
+
+ h_inode = d_inode(h_dentry);
+ if (d_is_negative(h_dentry)) {
+ if (!allow_neg)
+ goto out_neg;
+ } else if (wh_found
+ || (args->type && args->type != (h_inode->i_mode & S_IFMT)))
+ goto out_neg;
+ else if (au_ftest_lkup(args->flags, DIRREN)
+ /* && h_inode */
+ && !au_dr_lkup_h_ino(args, bindex, h_inode->i_ino)) {
+ AuDbg("b%d %pd ignored hi%llu\n", bindex, h_dentry,
+ (unsigned long long)h_inode->i_ino);
+ goto out_neg;
+ }
+
+ if (au_dbbot(dentry) <= bindex)
+ au_set_dbbot(dentry, bindex);
+ if (au_dbtop(dentry) < 0 || bindex < au_dbtop(dentry))
+ au_set_dbtop(dentry, bindex);
+ au_set_h_dptr(dentry, bindex, h_dentry);
+
+ if (!d_is_dir(h_dentry)
+ || !wh_able
+ || (d_really_is_positive(dentry) && !d_is_dir(dentry)))
+ goto out; /* success */
+
+ inode_lock_shared_nested(h_inode, AuLsc_I_CHILD);
+ opq = au_diropq_test(h_dentry);
+ inode_unlock_shared(h_inode);
+ if (opq > 0)
+ au_set_dbdiropq(dentry, bindex);
+ else if (unlikely(opq < 0)) {
+ au_set_h_dptr(dentry, bindex, NULL);
+ h_dentry = ERR_PTR(opq);
+ }
+ goto out;
+
+out_neg:
+ dput(h_dentry);
+ h_dentry = NULL;
+out:
+ return h_dentry;
+}
+
+static int au_test_shwh(struct super_block *sb, const struct qstr *name)
+{
+ if (unlikely(!au_opt_test(au_mntflags(sb), SHWH)
+ && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)))
+ return -EPERM;
+ return 0;
+}
+
+/*
+ * returns the number of lower positive dentries,
+ * otherwise an error.
+ * can be called at unlinking with @type is zero.
+ */
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t btop,
+ unsigned int flags)
+{
+ int npositive, err;
+ aufs_bindex_t bindex, btail, bdiropq;
+ unsigned char isdir, dirperm1, dirren;
+ struct au_do_lookup_args args = {
+ .flags = flags,
+ .name = &dentry->d_name
+ };
+ struct dentry *parent;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ err = au_test_shwh(sb, args.name);
+ if (unlikely(err))
+ goto out;
+
+ err = au_wh_name_alloc(&args.whname, args.name);
+ if (unlikely(err))
+ goto out;
+
+ isdir = !!d_is_dir(dentry);
+ dirperm1 = !!au_opt_test(au_mntflags(sb), DIRPERM1);
+ dirren = !!au_opt_test(au_mntflags(sb), DIRREN);
+ if (dirren)
+ au_fset_lkup(args.flags, DIRREN);
+
+ npositive = 0;
+ parent = dget_parent(dentry);
+ btail = au_dbtaildir(parent);
+ for (bindex = btop; bindex <= btail; bindex++) {
+ struct dentry *h_parent, *h_dentry;
+ struct inode *h_inode, *h_dir;
+ struct au_branch *br;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry) {
+ if (d_is_positive(h_dentry))
+ npositive++;
+ break;
+ }
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || !d_is_dir(h_parent))
+ continue;
+
+ if (dirren) {
+ /* if the inum matches, then use the prepared name */
+ err = au_dr_lkup_name(&args, bindex);
+ if (unlikely(err))
+ goto out_parent;
+ }
+
+ h_dir = d_inode(h_parent);
+ inode_lock_shared_nested(h_dir, AuLsc_I_PARENT);
+ h_dentry = au_do_lookup(h_parent, dentry, bindex, &args);
+ inode_unlock_shared(h_dir);
+ err = PTR_ERR(h_dentry);
+ if (IS_ERR(h_dentry))
+ goto out_parent;
+ if (h_dentry)
+ au_fclr_lkup(args.flags, ALLOW_NEG);
+ if (dirperm1)
+ au_fset_lkup(args.flags, IGNORE_PERM);
+
+ if (au_dbwh(dentry) == bindex)
+ break;
+ if (!h_dentry)
+ continue;
+ if (d_is_negative(h_dentry))
+ continue;
+ h_inode = d_inode(h_dentry);
+ npositive++;
+ if (!args.type)
+ args.type = h_inode->i_mode & S_IFMT;
+ if (args.type != S_IFDIR)
+ break;
+ else if (isdir) {
+ /* the type of lower may be different */
+ bdiropq = au_dbdiropq(dentry);
+ if (bdiropq >= 0 && bdiropq <= bindex)
+ break;
+ }
+ br = au_sbr(sb, bindex);
+ if (dirren
+ && au_dr_hino_test_add(&br->br_dirren, h_inode->i_ino,
+ /*add_ent*/NULL)) {
+ /* prepare next name to lookup */
+ err = au_dr_lkup(&args, dentry, bindex);
+ if (unlikely(err))
+ goto out_parent;
+ }
+ }
+
+ if (npositive) {
+ AuLabel(positive);
+ au_update_dbtop(dentry);
+ }
+ err = npositive;
+ if (unlikely(!au_opt_test(au_mntflags(sb), UDBA_NONE)
+ && au_dbtop(dentry) < 0)) {
+ err = -EIO;
+ AuIOErr("both of real entry and whiteout found, %pd, err %d\n",
+ dentry, err);
+ }
+
+out_parent:
+ dput(parent);
+ au_kfree_try_rcu(args.whname.name);
+ if (dirren)
+ au_dr_lkup_fin(&args);
+out:
+ return err;
+}
+
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent)
+{
+ struct dentry *dentry;
+ int wkq_err;
+
+ if (!au_test_h_perm_sio(d_inode(parent), MAY_EXEC))
+ dentry = vfsub_lkup_one(name, parent);
+ else {
+ struct vfsub_lkup_one_args args = {
+ .errp = &dentry,
+ .name = name,
+ .parent = parent
+ };
+
+ wkq_err = au_wkq_wait(vfsub_call_lkup_one, &args);
+ if (unlikely(wkq_err))
+ dentry = ERR_PTR(wkq_err);
+ }
+
+ return dentry;
+}
+
+/*
+ * lookup @dentry on @bindex which should be negative.
+ */
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh)
+{
+ int err;
+ struct dentry *parent, *h_parent, *h_dentry;
+ struct au_branch *br;
+
+ parent = dget_parent(dentry);
+ h_parent = au_h_dptr(parent, bindex);
+ br = au_sbr(dentry->d_sb, bindex);
+ if (wh)
+ h_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+ else
+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent);
+ err = PTR_ERR(h_dentry);
+ if (IS_ERR(h_dentry))
+ goto out;
+ if (unlikely(d_is_positive(h_dentry))) {
+ err = -EIO;
+ AuIOErr("%pd should be negative on b%d.\n", h_dentry, bindex);
+ dput(h_dentry);
+ goto out;
+ }
+
+ err = 0;
+ if (bindex < au_dbtop(dentry))
+ au_set_dbtop(dentry, bindex);
+ if (au_dbbot(dentry) < bindex)
+ au_set_dbbot(dentry, bindex);
+ au_set_h_dptr(dentry, bindex, h_dentry);
+
+out:
+ dput(parent);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* subset of struct inode */
+struct au_iattr {
+ unsigned long i_ino;
+ /* unsigned int i_nlink; */
+ kuid_t i_uid;
+ kgid_t i_gid;
+ u64 i_version;
+/*
+ loff_t i_size;
+ blkcnt_t i_blocks;
+*/
+ umode_t i_mode;
+};
+
+static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode)
+{
+ ia->i_ino = h_inode->i_ino;
+ /* ia->i_nlink = h_inode->i_nlink; */
+ ia->i_uid = h_inode->i_uid;
+ ia->i_gid = h_inode->i_gid;
+ ia->i_version = inode_query_iversion(h_inode);
+/*
+ ia->i_size = h_inode->i_size;
+ ia->i_blocks = h_inode->i_blocks;
+*/
+ ia->i_mode = (h_inode->i_mode & S_IFMT);
+}
+
+static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode)
+{
+ return ia->i_ino != h_inode->i_ino
+ /* || ia->i_nlink != h_inode->i_nlink */
+ || !uid_eq(ia->i_uid, h_inode->i_uid)
+ || !gid_eq(ia->i_gid, h_inode->i_gid)
+ || !inode_eq_iversion(h_inode, ia->i_version)
+/*
+ || ia->i_size != h_inode->i_size
+ || ia->i_blocks != h_inode->i_blocks
+*/
+ || ia->i_mode != (h_inode->i_mode & S_IFMT);
+}
+
+static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent,
+ struct au_branch *br)
+{
+ int err;
+ struct au_iattr ia;
+ struct inode *h_inode;
+ struct dentry *h_d;
+ struct super_block *h_sb;
+
+ err = 0;
+ memset(&ia, -1, sizeof(ia));
+ h_sb = h_dentry->d_sb;
+ h_inode = NULL;
+ if (d_is_positive(h_dentry)) {
+ h_inode = d_inode(h_dentry);
+ au_iattr_save(&ia, h_inode);
+ } else if (au_test_nfs(h_sb) || au_test_fuse(h_sb))
+ /* nfs d_revalidate may return 0 for negative dentry */
+ /* fuse d_revalidate always return 0 for negative dentry */
+ goto out;
+
+ /* main purpose is namei.c:cached_lookup() and d_revalidate */
+ h_d = vfsub_lkup_one(&h_dentry->d_name, h_parent);
+ err = PTR_ERR(h_d);
+ if (IS_ERR(h_d))
+ goto out;
+
+ err = 0;
+ if (unlikely(h_d != h_dentry
+ || d_inode(h_d) != h_inode
+ || (h_inode && au_iattr_test(&ia, h_inode))))
+ err = au_busy_or_stale();
+ dput(h_d);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+ struct dentry *h_parent, struct au_branch *br)
+{
+ int err;
+
+ err = 0;
+ if (udba == AuOpt_UDBA_REVAL
+ && !au_test_fs_remote(h_dentry->d_sb)) {
+ IMustLock(h_dir);
+ err = (d_inode(h_dentry->d_parent) != h_dir);
+ } else if (udba != AuOpt_UDBA_NONE)
+ err = au_h_verify_dentry(h_dentry, h_parent, br);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_refresh_hdentry(struct dentry *dentry, struct dentry *parent)
+{
+ int err;
+ aufs_bindex_t new_bindex, bindex, bbot, bwh, bdiropq;
+ struct au_hdentry tmp, *p, *q;
+ struct au_dinfo *dinfo;
+ struct super_block *sb;
+
+ DiMustWriteLock(dentry);
+
+ sb = dentry->d_sb;
+ dinfo = au_di(dentry);
+ bbot = dinfo->di_bbot;
+ bwh = dinfo->di_bwh;
+ bdiropq = dinfo->di_bdiropq;
+ bindex = dinfo->di_btop;
+ p = au_hdentry(dinfo, bindex);
+ for (; bindex <= bbot; bindex++, p++) {
+ if (!p->hd_dentry)
+ continue;
+
+ new_bindex = au_br_index(sb, p->hd_id);
+ if (new_bindex == bindex)
+ continue;
+
+ if (dinfo->di_bwh == bindex)
+ bwh = new_bindex;
+ if (dinfo->di_bdiropq == bindex)
+ bdiropq = new_bindex;
+ if (new_bindex < 0) {
+ au_hdput(p);
+ p->hd_dentry = NULL;
+ continue;
+ }
+
+ /* swap two lower dentries, and loop again */
+ q = au_hdentry(dinfo, new_bindex);
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ if (tmp.hd_dentry) {
+ bindex--;
+ p--;
+ }
+ }
+
+ dinfo->di_bwh = -1;
+ if (bwh >= 0 && bwh <= au_sbbot(sb) && au_sbr_whable(sb, bwh))
+ dinfo->di_bwh = bwh;
+
+ dinfo->di_bdiropq = -1;
+ if (bdiropq >= 0
+ && bdiropq <= au_sbbot(sb)
+ && au_sbr_whable(sb, bdiropq))
+ dinfo->di_bdiropq = bdiropq;
+
+ err = -EIO;
+ dinfo->di_btop = -1;
+ dinfo->di_bbot = -1;
+ bbot = au_dbbot(parent);
+ bindex = 0;
+ p = au_hdentry(dinfo, bindex);
+ for (; bindex <= bbot; bindex++, p++)
+ if (p->hd_dentry) {
+ dinfo->di_btop = bindex;
+ break;
+ }
+
+ if (dinfo->di_btop >= 0) {
+ bindex = bbot;
+ p = au_hdentry(dinfo, bindex);
+ for (; bindex >= 0; bindex--, p--)
+ if (p->hd_dentry) {
+ dinfo->di_bbot = bindex;
+ err = 0;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static void au_do_hide(struct dentry *dentry)
+{
+ struct inode *inode;
+
+ if (d_really_is_positive(dentry)) {
+ inode = d_inode(dentry);
+ if (!d_is_dir(dentry)) {
+ if (inode->i_nlink && !d_unhashed(dentry))
+ drop_nlink(inode);
+ } else {
+ clear_nlink(inode);
+ /* stop next lookup */
+ inode->i_flags |= S_DEAD;
+ }
+ smp_mb(); /* necessary? */
+ }
+ d_drop(dentry);
+}
+
+static int au_hide_children(struct dentry *parent)
+{
+ int err, i, j, ndentry;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry *dentry;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_dcsub_pages(&dpages, parent, NULL, NULL);
+ if (unlikely(err))
+ goto out_dpages;
+
+ /* in reverse order */
+ for (i = dpages.ndpage - 1; i >= 0; i--) {
+ dpage = dpages.dpages + i;
+ ndentry = dpage->ndentry;
+ for (j = ndentry - 1; j >= 0; j--) {
+ dentry = dpage->dentries[j];
+ if (dentry != parent)
+ au_do_hide(dentry);
+ }
+ }
+
+out_dpages:
+ au_dpages_free(&dpages);
+out:
+ return err;
+}
+
+static void au_hide(struct dentry *dentry)
+{
+ int err;
+
+ AuDbgDentry(dentry);
+ if (d_is_dir(dentry)) {
+ /* shrink_dcache_parent(dentry); */
+ err = au_hide_children(dentry);
+ if (unlikely(err))
+ AuIOErr("%pd, failed hiding children, ignored %d\n",
+ dentry, err);
+ }
+ au_do_hide(dentry);
+}
+
+/*
+ * By adding a dirty branch, a cached dentry may be affected in various ways.
+ *
+ * a dirty branch is added
+ * - on the top of layers
+ * - in the middle of layers
+ * - to the bottom of layers
+ *
+ * on the added branch there exists
+ * - a whiteout
+ * - a diropq
+ * - a same named entry
+ * + exist
+ * * negative --> positive
+ * * positive --> positive
+ * - type is unchanged
+ * - type is changed
+ * + doesn't exist
+ * * negative --> negative
+ * * positive --> negative (rejected by au_br_del() for non-dir case)
+ * - none
+ */
+static int au_refresh_by_dinfo(struct dentry *dentry, struct au_dinfo *dinfo,
+ struct au_dinfo *tmp)
+{
+ int err;
+ aufs_bindex_t bindex, bbot;
+ struct {
+ struct dentry *dentry;
+ struct inode *inode;
+ mode_t mode;
+ } orig_h, tmp_h = {
+ .dentry = NULL
+ };
+ struct au_hdentry *hd;
+ struct inode *inode, *h_inode;
+ struct dentry *h_dentry;
+
+ err = 0;
+ AuDebugOn(dinfo->di_btop < 0);
+ orig_h.mode = 0;
+ orig_h.dentry = au_hdentry(dinfo, dinfo->di_btop)->hd_dentry;
+ orig_h.inode = NULL;
+ if (d_is_positive(orig_h.dentry)) {
+ orig_h.inode = d_inode(orig_h.dentry);
+ orig_h.mode = orig_h.inode->i_mode & S_IFMT;
+ }
+ if (tmp->di_btop >= 0) {
+ tmp_h.dentry = au_hdentry(tmp, tmp->di_btop)->hd_dentry;
+ if (d_is_positive(tmp_h.dentry)) {
+ tmp_h.inode = d_inode(tmp_h.dentry);
+ tmp_h.mode = tmp_h.inode->i_mode & S_IFMT;
+ }
+ }
+
+ inode = NULL;
+ if (d_really_is_positive(dentry))
+ inode = d_inode(dentry);
+ if (!orig_h.inode) {
+ AuDbg("negative originally\n");
+ if (inode) {
+ au_hide(dentry);
+ goto out;
+ }
+ AuDebugOn(inode);
+ AuDebugOn(dinfo->di_btop != dinfo->di_bbot);
+ AuDebugOn(dinfo->di_bdiropq != -1);
+
+ if (!tmp_h.inode) {
+ AuDbg("negative --> negative\n");
+ /* should have only one negative lower */
+ if (tmp->di_btop >= 0
+ && tmp->di_btop < dinfo->di_btop) {
+ AuDebugOn(tmp->di_btop != tmp->di_bbot);
+ AuDebugOn(dinfo->di_btop != dinfo->di_bbot);
+ au_set_h_dptr(dentry, dinfo->di_btop, NULL);
+ au_di_cp(dinfo, tmp);
+ hd = au_hdentry(tmp, tmp->di_btop);
+ au_set_h_dptr(dentry, tmp->di_btop,
+ dget(hd->hd_dentry));
+ }
+ au_dbg_verify_dinode(dentry);
+ } else {
+ AuDbg("negative --> positive\n");
+ /*
+ * similar to the behaviour of creating with bypassing
+ * aufs.
+ * unhash it in order to force an error in the
+ * succeeding create operation.
+ * we should not set S_DEAD here.
+ */
+ d_drop(dentry);
+ /* au_di_swap(tmp, dinfo); */
+ au_dbg_verify_dinode(dentry);
+ }
+ } else {
+ AuDbg("positive originally\n");
+ /* inode may be NULL */
+ AuDebugOn(inode && (inode->i_mode & S_IFMT) != orig_h.mode);
+ if (!tmp_h.inode) {
+ AuDbg("positive --> negative\n");
+ /* or bypassing aufs */
+ au_hide(dentry);
+ if (tmp->di_bwh >= 0 && tmp->di_bwh <= dinfo->di_btop)
+ dinfo->di_bwh = tmp->di_bwh;
+ if (inode)
+ err = au_refresh_hinode_self(inode);
+ au_dbg_verify_dinode(dentry);
+ } else if (orig_h.mode == tmp_h.mode) {
+ AuDbg("positive --> positive, same type\n");
+ if (!S_ISDIR(orig_h.mode)
+ && dinfo->di_btop > tmp->di_btop) {
+ /*
+ * similar to the behaviour of removing and
+ * creating.
+ */
+ au_hide(dentry);
+ if (inode)
+ err = au_refresh_hinode_self(inode);
+ au_dbg_verify_dinode(dentry);
+ } else {
+ /* fill empty slots */
+ if (dinfo->di_btop > tmp->di_btop)
+ dinfo->di_btop = tmp->di_btop;
+ if (dinfo->di_bbot < tmp->di_bbot)
+ dinfo->di_bbot = tmp->di_bbot;
+ dinfo->di_bwh = tmp->di_bwh;
+ dinfo->di_bdiropq = tmp->di_bdiropq;
+ bbot = dinfo->di_bbot;
+ bindex = tmp->di_btop;
+ hd = au_hdentry(tmp, bindex);
+ for (; bindex <= bbot; bindex++, hd++) {
+ if (au_h_dptr(dentry, bindex))
+ continue;
+ h_dentry = hd->hd_dentry;
+ if (!h_dentry)
+ continue;
+ AuDebugOn(d_is_negative(h_dentry));
+ h_inode = d_inode(h_dentry);
+ AuDebugOn(orig_h.mode
+ != (h_inode->i_mode
+ & S_IFMT));
+ au_set_h_dptr(dentry, bindex,
+ dget(h_dentry));
+ }
+ if (inode)
+ err = au_refresh_hinode(inode, dentry);
+ au_dbg_verify_dinode(dentry);
+ }
+ } else {
+ AuDbg("positive --> positive, different type\n");
+ /* similar to the behaviour of removing and creating */
+ au_hide(dentry);
+ if (inode)
+ err = au_refresh_hinode_self(inode);
+ au_dbg_verify_dinode(dentry);
+ }
+ }
+
+out:
+ return err;
+}
+
+void au_refresh_dop(struct dentry *dentry, int force_reval)
+{
+ const struct dentry_operations *dop
+ = force_reval ? &aufs_dop : dentry->d_sb->s_d_op;
+ static const unsigned int mask
+ = DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE;
+
+ BUILD_BUG_ON(sizeof(mask) != sizeof(dentry->d_flags));
+
+ if (dentry->d_op == dop)
+ return;
+
+ AuDbg("%pd\n", dentry);
+ spin_lock(&dentry->d_lock);
+ if (dop == &aufs_dop)
+ dentry->d_flags |= mask;
+ else
+ dentry->d_flags &= ~mask;
+ dentry->d_op = dop;
+ spin_unlock(&dentry->d_lock);
+}
+
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
+{
+ int err, ebrange, nbr;
+ unsigned int sigen;
+ struct au_dinfo *dinfo, *tmp;
+ struct super_block *sb;
+ struct inode *inode;
+
+ DiMustWriteLock(dentry);
+ AuDebugOn(IS_ROOT(dentry));
+ AuDebugOn(d_really_is_negative(parent));
+
+ sb = dentry->d_sb;
+ sigen = au_sigen(sb);
+ err = au_digen_test(parent, sigen);
+ if (unlikely(err))
+ goto out;
+
+ nbr = au_sbbot(sb) + 1;
+ dinfo = au_di(dentry);
+ err = au_di_realloc(dinfo, nbr, /*may_shrink*/0);
+ if (unlikely(err))
+ goto out;
+ ebrange = au_dbrange_test(dentry);
+ if (!ebrange)
+ ebrange = au_do_refresh_hdentry(dentry, parent);
+
+ if (d_unhashed(dentry) || ebrange /* || dinfo->di_tmpfile */) {
+ AuDebugOn(au_dbtop(dentry) < 0 && au_dbbot(dentry) >= 0);
+ if (d_really_is_positive(dentry)) {
+ inode = d_inode(dentry);
+ err = au_refresh_hinode_self(inode);
+ }
+ au_dbg_verify_dinode(dentry);
+ if (!err)
+ goto out_dgen; /* success */
+ goto out;
+ }
+
+ /* temporary dinfo */
+ AuDbgDentry(dentry);
+ err = -ENOMEM;
+ tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+ if (unlikely(!tmp))
+ goto out;
+ au_di_swap(tmp, dinfo);
+ /* returns the number of positive dentries */
+ /*
+ * if current working dir is removed, it returns an error.
+ * but the dentry is legal.
+ */
+ err = au_lkup_dentry(dentry, /*btop*/0, AuLkup_ALLOW_NEG);
+ AuDbgDentry(dentry);
+ au_di_swap(tmp, dinfo);
+ if (err == -ENOENT)
+ err = 0;
+ if (err >= 0) {
+ /* compare/refresh by dinfo */
+ AuDbgDentry(dentry);
+ err = au_refresh_by_dinfo(dentry, dinfo, tmp);
+ au_dbg_verify_dinode(dentry);
+ AuTraceErr(err);
+ }
+ au_di_realloc(dinfo, nbr, /*may_shrink*/1); /* harmless if err */
+ au_rw_write_unlock(&tmp->di_rwsem);
+ au_di_free(tmp);
+ if (unlikely(err))
+ goto out;
+
+out_dgen:
+ au_update_digen(dentry);
+out:
+ if (unlikely(err && !(dentry->d_flags & DCACHE_NFSFS_RENAMED))) {
+ AuIOErr("failed refreshing %pd, %d\n", dentry, err);
+ AuDbgDentry(dentry);
+ }
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_do_h_d_reval(struct dentry *h_dentry, unsigned int flags,
+ struct dentry *dentry, aufs_bindex_t bindex)
+{
+ int err, valid;
+
+ err = 0;
+ if (!(h_dentry->d_flags & DCACHE_OP_REVALIDATE))
+ goto out;
+
+ AuDbg("b%d\n", bindex);
+ /*
+ * gave up supporting LOOKUP_CREATE/OPEN for lower fs,
+ * due to whiteout and branch permission.
+ */
+ flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE
+ | LOOKUP_FOLLOW | LOOKUP_EXCL);
+ /* it may return tri-state */
+ valid = h_dentry->d_op->d_revalidate(h_dentry, flags);
+
+ if (unlikely(valid < 0))
+ err = valid;
+ else if (!valid)
+ err = -EINVAL;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* todo: remove this */
+static int h_d_revalidate(struct dentry *dentry, struct inode *inode,
+ unsigned int flags, int do_udba, int dirren)
+{
+ int err;
+ umode_t mode, h_mode;
+ aufs_bindex_t bindex, btail, btop, ibs, ibe;
+ unsigned char plus, unhashed, is_root, h_plus, h_nfs, tmpfile;
+ struct inode *h_inode, *h_cached_inode;
+ struct dentry *h_dentry;
+ struct qstr *name, *h_name;
+
+ err = 0;
+ plus = 0;
+ mode = 0;
+ ibs = -1;
+ ibe = -1;
+ unhashed = !!d_unhashed(dentry);
+ is_root = !!IS_ROOT(dentry);
+ name = &dentry->d_name;
+ tmpfile = au_di(dentry)->di_tmpfile;
+
+ /*
+ * Theoretically, REVAL test should be unnecessary in case of
+ * {FS,I}NOTIFY.
+ * But {fs,i}notify doesn't fire some necessary events,
+ * IN_ATTRIB for atime/nlink/pageio
+ * Let's do REVAL test too.
+ */
+ if (do_udba && inode) {
+ mode = (inode->i_mode & S_IFMT);
+ plus = (inode->i_nlink > 0);
+ ibs = au_ibtop(inode);
+ ibe = au_ibbot(inode);
+ }
+
+ btop = au_dbtop(dentry);
+ btail = btop;
+ if (inode && S_ISDIR(inode->i_mode))
+ btail = au_dbtaildir(dentry);
+ for (bindex = btop; bindex <= btail; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+
+ AuDbg("b%d, %pd\n", bindex, h_dentry);
+ h_nfs = !!au_test_nfs(h_dentry->d_sb);
+ spin_lock(&h_dentry->d_lock);
+ h_name = &h_dentry->d_name;
+ if (unlikely(do_udba
+ && !is_root
+ && ((!h_nfs
+ && (unhashed != !!d_unhashed(h_dentry)
+ || (!tmpfile && !dirren
+ && !au_qstreq(name, h_name))
+ ))
+ || (h_nfs
+ && !(flags & LOOKUP_OPEN)
+ && (h_dentry->d_flags
+ & DCACHE_NFSFS_RENAMED)))
+ )) {
+ int h_unhashed;
+
+ h_unhashed = d_unhashed(h_dentry);
+ spin_unlock(&h_dentry->d_lock);
+ AuDbg("unhash 0x%x 0x%x, %pd %pd\n",
+ unhashed, h_unhashed, dentry, h_dentry);
+ goto err;
+ }
+ spin_unlock(&h_dentry->d_lock);
+
+ err = au_do_h_d_reval(h_dentry, flags, dentry, bindex);
+ if (unlikely(err))
+ /* do not goto err, to keep the errno */
+ break;
+
+ /* todo: plink too? */
+ if (!do_udba)
+ continue;
+
+ /* UDBA tests */
+ if (unlikely(!!inode != d_is_positive(h_dentry)))
+ goto err;
+
+ h_inode = NULL;
+ if (d_is_positive(h_dentry))
+ h_inode = d_inode(h_dentry);
+ h_plus = plus;
+ h_mode = mode;
+ h_cached_inode = h_inode;
+ if (h_inode) {
+ h_mode = (h_inode->i_mode & S_IFMT);
+ h_plus = (h_inode->i_nlink > 0);
+ }
+ if (inode && ibs <= bindex && bindex <= ibe)
+ h_cached_inode = au_h_iptr(inode, bindex);
+
+ if (!h_nfs) {
+ if (unlikely(plus != h_plus && !tmpfile))
+ goto err;
+ } else {
+ if (unlikely(!(h_dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ && !is_root
+ && !IS_ROOT(h_dentry)
+ && unhashed != d_unhashed(h_dentry)))
+ goto err;
+ }
+ if (unlikely(mode != h_mode
+ || h_cached_inode != h_inode))
+ goto err;
+ continue;
+
+err:
+ err = -EINVAL;
+ break;
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+/* todo: consolidate with do_refresh() and au_reval_for_attr() */
+static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+ int err;
+ struct dentry *parent;
+
+ if (!au_digen_test(dentry, sigen))
+ return 0;
+
+ parent = dget_parent(dentry);
+ di_read_lock_parent(parent, AuLock_IR);
+ AuDebugOn(au_digen_test(parent, sigen));
+ au_dbg_verify_gen(parent, sigen);
+ err = au_refresh_dentry(dentry, parent);
+ di_read_unlock(parent, AuLock_IR);
+ dput(parent);
+ AuTraceErr(err);
+ return err;
+}
+
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+ int err;
+ struct dentry *d, *parent;
+
+ if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR))
+ return simple_reval_dpath(dentry, sigen);
+
+ /* slow loop, keep it simple and stupid */
+ /* cf: au_cpup_dirs() */
+ err = 0;
+ parent = NULL;
+ while (au_digen_test(dentry, sigen)) {
+ d = dentry;
+ while (1) {
+ dput(parent);
+ parent = dget_parent(d);
+ if (!au_digen_test(parent, sigen))
+ break;
+ d = parent;
+ }
+
+ if (d != dentry)
+ di_write_lock_child2(d);
+
+ /* someone might update our dentry while we were sleeping */
+ if (au_digen_test(d, sigen)) {
+ /*
+ * todo: consolidate with simple_reval_dpath(),
+ * do_refresh() and au_reval_for_attr().
+ */
+ di_read_lock_parent(parent, AuLock_IR);
+ err = au_refresh_dentry(d, parent);
+ di_read_unlock(parent, AuLock_IR);
+ }
+
+ if (d != dentry)
+ di_write_unlock(d);
+ dput(parent);
+ if (unlikely(err))
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * if valid returns 1, otherwise 0.
+ */
+static int aufs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ int valid, err;
+ unsigned int sigen;
+ unsigned char do_udba, dirren;
+ struct super_block *sb;
+ struct inode *inode;
+
+ /* todo: support rcu-walk? */
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ valid = 0;
+ if (unlikely(!au_di(dentry)))
+ goto out;
+
+ valid = 1;
+ sb = dentry->d_sb;
+ /*
+ * todo: very ugly
+ * i_mutex of parent dir may be held,
+ * but we should not return 'invalid' due to busy.
+ */
+ err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW | AuLock_NOPLM);
+ if (unlikely(err)) {
+ valid = err;
+ AuTraceErr(err);
+ goto out;
+ }
+ inode = NULL;
+ if (d_really_is_positive(dentry))
+ inode = d_inode(dentry);
+ if (unlikely(inode && au_is_bad_inode(inode))) {
+ err = -EINVAL;
+ AuTraceErr(err);
+ goto out_dgrade;
+ }
+ if (unlikely(au_dbrange_test(dentry))) {
+ err = -EINVAL;
+ AuTraceErr(err);
+ goto out_dgrade;
+ }
+
+ sigen = au_sigen(sb);
+ if (au_digen_test(dentry, sigen)) {
+ AuDebugOn(IS_ROOT(dentry));
+ err = au_reval_dpath(dentry, sigen);
+ if (unlikely(err)) {
+ AuTraceErr(err);
+ goto out_dgrade;
+ }
+ }
+ di_downgrade_lock(dentry, AuLock_IR);
+
+ err = -EINVAL;
+ if (!(flags & (LOOKUP_OPEN | LOOKUP_EMPTY))
+ && inode
+ && !(inode->i_state && I_LINKABLE)
+ && (IS_DEADDIR(inode) || !inode->i_nlink)) {
+ AuTraceErr(err);
+ goto out_inval;
+ }
+
+ do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE);
+ if (do_udba && inode) {
+ aufs_bindex_t btop = au_ibtop(inode);
+ struct inode *h_inode;
+
+ if (btop >= 0) {
+ h_inode = au_h_iptr(inode, btop);
+ if (h_inode && au_test_higen(inode, h_inode)) {
+ AuTraceErr(err);
+ goto out_inval;
+ }
+ }
+ }
+
+ dirren = !!au_opt_test(au_mntflags(sb), DIRREN);
+ err = h_d_revalidate(dentry, inode, flags, do_udba, dirren);
+ if (unlikely(!err && do_udba && au_dbtop(dentry) < 0)) {
+ err = -EIO;
+ AuDbg("both of real entry and whiteout found, %p, err %d\n",
+ dentry, err);
+ }
+ goto out_inval;
+
+out_dgrade:
+ di_downgrade_lock(dentry, AuLock_IR);
+out_inval:
+ aufs_read_unlock(dentry, AuLock_IR);
+ AuTraceErr(err);
+ valid = !err;
+out:
+ if (!valid) {
+ AuDbg("%pd invalid, %d\n", dentry, valid);
+ d_drop(dentry);
+ }
+ return valid;
+}
+
+static void aufs_d_release(struct dentry *dentry)
+{
+ if (au_di(dentry)) {
+ au_di_fin(dentry);
+ au_hn_di_reinit(dentry);
+ }
+}
+
+const struct dentry_operations aufs_dop = {
+ .d_revalidate = aufs_d_revalidate,
+ .d_weak_revalidate = aufs_d_revalidate,
+ .d_release = aufs_d_release
+};
+
+/* aufs_dop without d_revalidate */
+const struct dentry_operations aufs_dop_noreval = {
+ .d_release = aufs_d_release
+};
diff --git a/fs/aufs/dentry.h b/fs/aufs/dentry.h
new file mode 100644
index 000000000000..3b443fc7eef0
--- /dev/null
+++ b/fs/aufs/dentry.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#ifndef __AUFS_DENTRY_H__
+#define __AUFS_DENTRY_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include "dirren.h"
+#include "rwsem.h"
+
+struct au_hdentry {
+ struct dentry *hd_dentry;
+ aufs_bindex_t hd_id;
+};
+
+struct au_dinfo {
+ atomic_t di_generation;
+
+ struct au_rwsem di_rwsem;
+ aufs_bindex_t di_btop, di_bbot, di_bwh, di_bdiropq;
+ unsigned char di_tmpfile; /* to allow the different name */
+ struct au_hdentry *di_hdentry;
+ struct rcu_head rcu;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* flags for au_lkup_dentry() */
+#define AuLkup_ALLOW_NEG 1
+#define AuLkup_IGNORE_PERM (1 << 1)
+#define AuLkup_DIRREN (1 << 2)
+#define au_ftest_lkup(flags, name) ((flags) & AuLkup_##name)
+#define au_fset_lkup(flags, name) \
+ do { (flags) |= AuLkup_##name; } while (0)
+#define au_fclr_lkup(flags, name) \
+ do { (flags) &= ~AuLkup_##name; } while (0)
+
+#ifndef CONFIG_AUFS_DIRREN
+#undef AuLkup_DIRREN
+#define AuLkup_DIRREN 0
+#endif
+
+struct au_do_lookup_args {
+ unsigned int flags;
+ mode_t type;
+ struct qstr whname, *name;
+ struct au_dr_lookup dirren;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dentry.c */
+extern const struct dentry_operations aufs_dop, aufs_dop_noreval;
+struct au_branch;
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent);
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+ struct dentry *h_parent, struct au_branch *br);
+
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t btop,
+ unsigned int flags);
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh);
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent);
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen);
+void au_refresh_dop(struct dentry *dentry, int force_reval);
+
+/* dinfo.c */
+void au_di_init_once(void *_di);
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc);
+void au_di_free(struct au_dinfo *dinfo);
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b);
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src);
+int au_di_init(struct dentry *dentry);
+void au_di_fin(struct dentry *dentry);
+int au_di_realloc(struct au_dinfo *dinfo, int nbr, int may_shrink);
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc);
+void di_read_unlock(struct dentry *d, int flags);
+void di_downgrade_lock(struct dentry *d, int flags);
+void di_write_lock(struct dentry *d, unsigned int lsc);
+void di_write_unlock(struct dentry *d);
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex);
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex);
+aufs_bindex_t au_dbtail(struct dentry *dentry);
+aufs_bindex_t au_dbtaildir(struct dentry *dentry);
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_dentry);
+int au_digen_test(struct dentry *dentry, unsigned int sigen);
+int au_dbrange_test(struct dentry *dentry);
+void au_update_digen(struct dentry *dentry);
+void au_update_dbrange(struct dentry *dentry, int do_put_zero);
+void au_update_dbtop(struct dentry *dentry);
+void au_update_dbbot(struct dentry *dentry);
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_dinfo *au_di(struct dentry *dentry)
+{
+ return dentry->d_fsdata;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for dinfo */
+enum {
+ AuLsc_DI_CHILD, /* child first */
+ AuLsc_DI_CHILD2, /* rename(2), link(2), and cpup at hnotify */
+ AuLsc_DI_CHILD3, /* copyup dirs */
+ AuLsc_DI_PARENT,
+ AuLsc_DI_PARENT2,
+ AuLsc_DI_PARENT3,
+ AuLsc_DI_TMP /* temp for replacing dinfo */
+};
+
+/*
+ * di_read_lock_child, di_write_lock_child,
+ * di_read_lock_child2, di_write_lock_child2,
+ * di_read_lock_child3, di_write_lock_child3,
+ * di_read_lock_parent, di_write_lock_parent,
+ * di_read_lock_parent2, di_write_lock_parent2,
+ * di_read_lock_parent3, di_write_lock_parent3,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void di_read_lock_##name(struct dentry *d, int flags) \
+{ di_read_lock(d, flags, AuLsc_DI_##lsc); }
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void di_write_lock_##name(struct dentry *d) \
+{ di_write_lock(d, AuLsc_DI_##lsc); }
+
+#define AuRWLockFuncs(name, lsc) \
+ AuReadLockFunc(name, lsc) \
+ AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define DiMustNoWaiters(d) AuRwMustNoWaiters(&au_di(d)->di_rwsem)
+#define DiMustAnyLock(d) AuRwMustAnyLock(&au_di(d)->di_rwsem)
+#define DiMustWriteLock(d) AuRwMustWriteLock(&au_di(d)->di_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: memory barrier? */
+static inline unsigned int au_digen(struct dentry *d)
+{
+ return atomic_read(&au_di(d)->di_generation);
+}
+
+static inline void au_h_dentry_init(struct au_hdentry *hdentry)
+{
+ hdentry->hd_dentry = NULL;
+}
+
+static inline struct au_hdentry *au_hdentry(struct au_dinfo *di,
+ aufs_bindex_t bindex)
+{
+ return di->di_hdentry + bindex;
+}
+
+static inline void au_hdput(struct au_hdentry *hd)
+{
+ if (hd)
+ dput(hd->hd_dentry);
+}
+
+static inline aufs_bindex_t au_dbtop(struct dentry *dentry)
+{
+ DiMustAnyLock(dentry);
+ return au_di(dentry)->di_btop;
+}
+
+static inline aufs_bindex_t au_dbbot(struct dentry *dentry)
+{
+ DiMustAnyLock(dentry);
+ return au_di(dentry)->di_bbot;
+}
+
+static inline aufs_bindex_t au_dbwh(struct dentry *dentry)
+{
+ DiMustAnyLock(dentry);
+ return au_di(dentry)->di_bwh;
+}
+
+static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry)
+{
+ DiMustAnyLock(dentry);
+ return au_di(dentry)->di_bdiropq;
+}
+
+/* todo: hard/soft set? */
+static inline void au_set_dbtop(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ DiMustWriteLock(dentry);
+ au_di(dentry)->di_btop = bindex;
+}
+
+static inline void au_set_dbbot(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ DiMustWriteLock(dentry);
+ au_di(dentry)->di_bbot = bindex;
+}
+
+static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ DiMustWriteLock(dentry);
+ /* dbwh can be outside of btop - bbot range */
+ au_di(dentry)->di_bwh = bindex;
+}
+
+static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ DiMustWriteLock(dentry);
+ au_di(dentry)->di_bdiropq = bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_HNOTIFY
+static inline void au_digen_dec(struct dentry *d)
+{
+ atomic_dec(&au_di(d)->di_generation);
+}
+
+static inline void au_hn_di_reinit(struct dentry *dentry)
+{
+ dentry->d_fsdata = NULL;
+}
+#else
+AuStubVoid(au_hn_di_reinit, struct dentry *dentry __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DENTRY_H__ */
diff --git a/fs/aufs/dinfo.c b/fs/aufs/dinfo.c
new file mode 100644
index 000000000000..86140548f046
--- /dev/null
+++ b/fs/aufs/dinfo.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * dentry private data
+ */
+
+#include "aufs.h"
+
+void au_di_init_once(void *_dinfo)
+{
+ struct au_dinfo *dinfo = _dinfo;
+
+ au_rw_init(&dinfo->di_rwsem);
+}
+
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc)
+{
+ struct au_dinfo *dinfo;
+ int nbr, i;
+
+ dinfo = au_cache_alloc_dinfo();
+ if (unlikely(!dinfo))
+ goto out;
+
+ nbr = au_sbbot(sb) + 1;
+ if (nbr <= 0)
+ nbr = 1;
+ dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS);
+ if (dinfo->di_hdentry) {
+ au_rw_write_lock_nested(&dinfo->di_rwsem, lsc);
+ dinfo->di_btop = -1;
+ dinfo->di_bbot = -1;
+ dinfo->di_bwh = -1;
+ dinfo->di_bdiropq = -1;
+ dinfo->di_tmpfile = 0;
+ for (i = 0; i < nbr; i++)
+ dinfo->di_hdentry[i].hd_id = -1;
+ goto out;
+ }
+
+ au_cache_free_dinfo(dinfo);
+ dinfo = NULL;
+
+out:
+ return dinfo;
+}
+
+void au_di_free(struct au_dinfo *dinfo)
+{
+ struct au_hdentry *p;
+ aufs_bindex_t bbot, bindex;
+
+ /* dentry may not be revalidated */
+ bindex = dinfo->di_btop;
+ if (bindex >= 0) {
+ bbot = dinfo->di_bbot;
+ p = au_hdentry(dinfo, bindex);
+ while (bindex++ <= bbot)
+ au_hdput(p++);
+ }
+ au_kfree_try_rcu(dinfo->di_hdentry);
+ au_cache_free_dinfo(dinfo);
+}
+
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b)
+{
+ struct au_hdentry *p;
+ aufs_bindex_t bi;
+
+ AuRwMustWriteLock(&a->di_rwsem);
+ AuRwMustWriteLock(&b->di_rwsem);
+
+#define DiSwap(v, name) \
+ do { \
+ v = a->di_##name; \
+ a->di_##name = b->di_##name; \
+ b->di_##name = v; \
+ } while (0)
+
+ DiSwap(p, hdentry);
+ DiSwap(bi, btop);
+ DiSwap(bi, bbot);
+ DiSwap(bi, bwh);
+ DiSwap(bi, bdiropq);
+ /* smp_mb(); */
+
+#undef DiSwap
+}
+
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src)
+{
+ AuRwMustWriteLock(&dst->di_rwsem);
+ AuRwMustWriteLock(&src->di_rwsem);
+
+ dst->di_btop = src->di_btop;
+ dst->di_bbot = src->di_bbot;
+ dst->di_bwh = src->di_bwh;
+ dst->di_bdiropq = src->di_bdiropq;
+ /* smp_mb(); */
+}
+
+int au_di_init(struct dentry *dentry)
+{
+ int err;
+ struct super_block *sb;
+ struct au_dinfo *dinfo;
+
+ err = 0;
+ sb = dentry->d_sb;
+ dinfo = au_di_alloc(sb, AuLsc_DI_CHILD);
+ if (dinfo) {
+ atomic_set(&dinfo->di_generation, au_sigen(sb));
+ /* smp_mb(); */ /* atomic_set */
+ dentry->d_fsdata = dinfo;
+ } else
+ err = -ENOMEM;
+
+ return err;
+}
+
+void au_di_fin(struct dentry *dentry)
+{
+ struct au_dinfo *dinfo;
+
+ dinfo = au_di(dentry);
+ AuRwDestroy(&dinfo->di_rwsem);
+ au_di_free(dinfo);
+}
+
+int au_di_realloc(struct au_dinfo *dinfo, int nbr, int may_shrink)
+{
+ int err, sz;
+ struct au_hdentry *hdp;
+
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ err = -ENOMEM;
+ sz = sizeof(*hdp) * (dinfo->di_bbot + 1);
+ if (!sz)
+ sz = sizeof(*hdp);
+ hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS,
+ may_shrink);
+ if (hdp) {
+ dinfo->di_hdentry = hdp;
+ err = 0;
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void do_ii_write_lock(struct inode *inode, unsigned int lsc)
+{
+ switch (lsc) {
+ case AuLsc_DI_CHILD:
+ ii_write_lock_child(inode);
+ break;
+ case AuLsc_DI_CHILD2:
+ ii_write_lock_child2(inode);
+ break;
+ case AuLsc_DI_CHILD3:
+ ii_write_lock_child3(inode);
+ break;
+ case AuLsc_DI_PARENT:
+ ii_write_lock_parent(inode);
+ break;
+ case AuLsc_DI_PARENT2:
+ ii_write_lock_parent2(inode);
+ break;
+ case AuLsc_DI_PARENT3:
+ ii_write_lock_parent3(inode);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void do_ii_read_lock(struct inode *inode, unsigned int lsc)
+{
+ switch (lsc) {
+ case AuLsc_DI_CHILD:
+ ii_read_lock_child(inode);
+ break;
+ case AuLsc_DI_CHILD2:
+ ii_read_lock_child2(inode);
+ break;
+ case AuLsc_DI_CHILD3:
+ ii_read_lock_child3(inode);
+ break;
+ case AuLsc_DI_PARENT:
+ ii_read_lock_parent(inode);
+ break;
+ case AuLsc_DI_PARENT2:
+ ii_read_lock_parent2(inode);
+ break;
+ case AuLsc_DI_PARENT3:
+ ii_read_lock_parent3(inode);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc)
+{
+ struct inode *inode;
+
+ au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc);
+ if (d_really_is_positive(d)) {
+ inode = d_inode(d);
+ if (au_ftest_lock(flags, IW))
+ do_ii_write_lock(inode, lsc);
+ else if (au_ftest_lock(flags, IR))
+ do_ii_read_lock(inode, lsc);
+ }
+}
+
+void di_read_unlock(struct dentry *d, int flags)
+{
+ struct inode *inode;
+
+ if (d_really_is_positive(d)) {
+ inode = d_inode(d);
+ if (au_ftest_lock(flags, IW)) {
+ au_dbg_verify_dinode(d);
+ ii_write_unlock(inode);
+ } else if (au_ftest_lock(flags, IR)) {
+ au_dbg_verify_dinode(d);
+ ii_read_unlock(inode);
+ }
+ }
+ au_rw_read_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_downgrade_lock(struct dentry *d, int flags)
+{
+ if (d_really_is_positive(d) && au_ftest_lock(flags, IR))
+ ii_downgrade_lock(d_inode(d));
+ au_rw_dgrade_lock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock(struct dentry *d, unsigned int lsc)
+{
+ au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc);
+ if (d_really_is_positive(d))
+ do_ii_write_lock(d_inode(d), lsc);
+}
+
+void di_write_unlock(struct dentry *d)
+{
+ au_dbg_verify_dinode(d);
+ if (d_really_is_positive(d))
+ ii_write_unlock(d_inode(d));
+ au_rw_write_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir)
+{
+ AuDebugOn(d1 == d2
+ || d_inode(d1) == d_inode(d2)
+ || d1->d_sb != d2->d_sb);
+
+ if ((isdir && au_test_subdir(d1, d2))
+ || d1 < d2) {
+ di_write_lock_child(d1);
+ di_write_lock_child2(d2);
+ } else {
+ di_write_lock_child(d2);
+ di_write_lock_child2(d1);
+ }
+}
+
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir)
+{
+ AuDebugOn(d1 == d2
+ || d_inode(d1) == d_inode(d2)
+ || d1->d_sb != d2->d_sb);
+
+ if ((isdir && au_test_subdir(d1, d2))
+ || d1 < d2) {
+ di_write_lock_parent(d1);
+ di_write_lock_parent2(d2);
+ } else {
+ di_write_lock_parent(d2);
+ di_write_lock_parent2(d1);
+ }
+}
+
+void di_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+ di_write_unlock(d1);
+ if (d_inode(d1) == d_inode(d2))
+ au_rw_write_unlock(&au_di(d2)->di_rwsem);
+ else
+ di_write_unlock(d2);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ struct dentry *d;
+
+ DiMustAnyLock(dentry);
+
+ if (au_dbtop(dentry) < 0 || bindex < au_dbtop(dentry))
+ return NULL;
+ AuDebugOn(bindex < 0);
+ d = au_hdentry(au_di(dentry), bindex)->hd_dentry;
+ AuDebugOn(d && au_dcount(d) <= 0);
+ return d;
+}
+
+/*
+ * extended version of au_h_dptr().
+ * returns a hashed and positive (or linkable) h_dentry in bindex, NULL, or
+ * error.
+ */
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ struct dentry *h_dentry;
+ struct inode *inode, *h_inode;
+
+ AuDebugOn(d_really_is_negative(dentry));
+
+ h_dentry = NULL;
+ if (au_dbtop(dentry) <= bindex
+ && bindex <= au_dbbot(dentry))
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry && !au_d_linkable(h_dentry)) {
+ dget(h_dentry);
+ goto out; /* success */
+ }
+
+ inode = d_inode(dentry);
+ AuDebugOn(bindex < au_ibtop(inode));
+ AuDebugOn(au_ibbot(inode) < bindex);
+ h_inode = au_h_iptr(inode, bindex);
+ h_dentry = d_find_alias(h_inode);
+ if (h_dentry) {
+ if (!IS_ERR(h_dentry)) {
+ if (!au_d_linkable(h_dentry))
+ goto out; /* success */
+ dput(h_dentry);
+ } else
+ goto out;
+ }
+
+ if (au_opt_test(au_mntflags(dentry->d_sb), PLINK)) {
+ h_dentry = au_plink_lkup(inode, bindex);
+ AuDebugOn(!h_dentry);
+ if (!IS_ERR(h_dentry)) {
+ if (!au_d_hashed_positive(h_dentry))
+ goto out; /* success */
+ dput(h_dentry);
+ h_dentry = NULL;
+ }
+ }
+
+out:
+ AuDbgDentry(h_dentry);
+ return h_dentry;
+}
+
+aufs_bindex_t au_dbtail(struct dentry *dentry)
+{
+ aufs_bindex_t bbot, bwh;
+
+ bbot = au_dbbot(dentry);
+ if (0 <= bbot) {
+ bwh = au_dbwh(dentry);
+ if (!bwh)
+ return bwh;
+ if (0 < bwh && bwh < bbot)
+ return bwh - 1;
+ }
+ return bbot;
+}
+
+aufs_bindex_t au_dbtaildir(struct dentry *dentry)
+{
+ aufs_bindex_t bbot, bopq;
+
+ bbot = au_dbtail(dentry);
+ if (0 <= bbot) {
+ bopq = au_dbdiropq(dentry);
+ if (0 <= bopq && bopq < bbot)
+ bbot = bopq;
+ }
+ return bbot;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_dentry)
+{
+ struct au_dinfo *dinfo;
+ struct au_hdentry *hd;
+ struct au_branch *br;
+
+ DiMustWriteLock(dentry);
+
+ dinfo = au_di(dentry);
+ hd = au_hdentry(dinfo, bindex);
+ au_hdput(hd);
+ hd->hd_dentry = h_dentry;
+ if (h_dentry) {
+ br = au_sbr(dentry->d_sb, bindex);
+ hd->hd_id = br->br_id;
+ }
+}
+
+int au_dbrange_test(struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t btop, bbot;
+
+ err = 0;
+ btop = au_dbtop(dentry);
+ bbot = au_dbbot(dentry);
+ if (btop >= 0)
+ AuDebugOn(bbot < 0 && btop > bbot);
+ else {
+ err = -EIO;
+ AuDebugOn(bbot >= 0);
+ }
+
+ return err;
+}
+
+int au_digen_test(struct dentry *dentry, unsigned int sigen)
+{
+ int err;
+
+ err = 0;
+ if (unlikely(au_digen(dentry) != sigen
+ || au_iigen_test(d_inode(dentry), sigen)))
+ err = -EIO;
+
+ return err;
+}
+
+void au_update_digen(struct dentry *dentry)
+{
+ atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb));
+ /* smp_mb(); */ /* atomic_set */
+}
+
+void au_update_dbrange(struct dentry *dentry, int do_put_zero)
+{
+ struct au_dinfo *dinfo;
+ struct dentry *h_d;
+ struct au_hdentry *hdp;
+ aufs_bindex_t bindex, bbot;
+
+ DiMustWriteLock(dentry);
+
+ dinfo = au_di(dentry);
+ if (!dinfo || dinfo->di_btop < 0)
+ return;
+
+ if (do_put_zero) {
+ bbot = dinfo->di_bbot;
+ bindex = dinfo->di_btop;
+ hdp = au_hdentry(dinfo, bindex);
+ for (; bindex <= bbot; bindex++, hdp++) {
+ h_d = hdp->hd_dentry;
+ if (h_d && d_is_negative(h_d))
+ au_set_h_dptr(dentry, bindex, NULL);
+ }
+ }
+
+ dinfo->di_btop = 0;
+ hdp = au_hdentry(dinfo, dinfo->di_btop);
+ for (; dinfo->di_btop <= dinfo->di_bbot; dinfo->di_btop++, hdp++)
+ if (hdp->hd_dentry)
+ break;
+ if (dinfo->di_btop > dinfo->di_bbot) {
+ dinfo->di_btop = -1;
+ dinfo->di_bbot = -1;
+ return;
+ }
+
+ hdp = au_hdentry(dinfo, dinfo->di_bbot);
+ for (; dinfo->di_bbot >= 0; dinfo->di_bbot--, hdp--)
+ if (hdp->hd_dentry)
+ break;
+ AuDebugOn(dinfo->di_btop > dinfo->di_bbot || dinfo->di_bbot < 0);
+}
+
+void au_update_dbtop(struct dentry *dentry)
+{
+ aufs_bindex_t bindex, bbot;
+ struct dentry *h_dentry;
+
+ bbot = au_dbbot(dentry);
+ for (bindex = au_dbtop(dentry); bindex <= bbot; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+ if (d_is_positive(h_dentry)) {
+ au_set_dbtop(dentry, bindex);
+ return;
+ }
+ au_set_h_dptr(dentry, bindex, NULL);
+ }
+}
+
+void au_update_dbbot(struct dentry *dentry)
+{
+ aufs_bindex_t bindex, btop;
+ struct dentry *h_dentry;
+
+ btop = au_dbtop(dentry);
+ for (bindex = au_dbbot(dentry); bindex >= btop; bindex--) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+ if (d_is_positive(h_dentry)) {
+ au_set_dbbot(dentry, bindex);
+ return;
+ }
+ au_set_h_dptr(dentry, bindex, NULL);
+ }
+}
+
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry)
+{
+ aufs_bindex_t bindex, bbot;
+
+ bbot = au_dbbot(dentry);
+ for (bindex = au_dbtop(dentry); bindex <= bbot; bindex++)
+ if (au_h_dptr(dentry, bindex) == h_dentry)
+ return bindex;
+ return -1;
+}
diff --git a/fs/aufs/dir.c b/fs/aufs/dir.c
new file mode 100644
index 000000000000..2efd983ecad7
--- /dev/null
+++ b/fs/aufs/dir.c
@@ -0,0 +1,762 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * directory operations
+ */
+
+#include <linux/fs_stack.h>
+#include "aufs.h"
+
+void au_add_nlink(struct inode *dir, struct inode *h_dir)
+{
+ unsigned int nlink;
+
+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+ nlink = dir->i_nlink;
+ nlink += h_dir->i_nlink - 2;
+ if (h_dir->i_nlink < 2)
+ nlink += 2;
+ smp_mb(); /* for i_nlink */
+ /* 0 can happen in revaliding */
+ set_nlink(dir, nlink);
+}
+
+void au_sub_nlink(struct inode *dir, struct inode *h_dir)
+{
+ unsigned int nlink;
+
+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+ nlink = dir->i_nlink;
+ nlink -= h_dir->i_nlink - 2;
+ if (h_dir->i_nlink < 2)
+ nlink -= 2;
+ smp_mb(); /* for i_nlink */
+ /* nlink == 0 means the branch-fs is broken */
+ set_nlink(dir, nlink);
+}
+
+loff_t au_dir_size(struct file *file, struct dentry *dentry)
+{
+ loff_t sz;
+ aufs_bindex_t bindex, bbot;
+ struct file *h_file;
+ struct dentry *h_dentry;
+
+ sz = 0;
+ if (file) {
+ AuDebugOn(!d_is_dir(file->f_path.dentry));
+
+ bbot = au_fbbot_dir(file);
+ for (bindex = au_fbtop(file);
+ bindex <= bbot && sz < KMALLOC_MAX_SIZE;
+ bindex++) {
+ h_file = au_hf_dir(file, bindex);
+ if (h_file && file_inode(h_file))
+ sz += vfsub_f_size_read(h_file);
+ }
+ } else {
+ AuDebugOn(!dentry);
+ AuDebugOn(!d_is_dir(dentry));
+
+ bbot = au_dbtaildir(dentry);
+ for (bindex = au_dbtop(dentry);
+ bindex <= bbot && sz < KMALLOC_MAX_SIZE;
+ bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry && d_is_positive(h_dentry))
+ sz += i_size_read(d_inode(h_dentry));
+ }
+ }
+ if (sz < KMALLOC_MAX_SIZE)
+ sz = roundup_pow_of_two(sz);
+ if (sz > KMALLOC_MAX_SIZE)
+ sz = KMALLOC_MAX_SIZE;
+ else if (sz < NAME_MAX) {
+ BUILD_BUG_ON(AUFS_RDBLK_DEF < NAME_MAX);
+ sz = AUFS_RDBLK_DEF;
+ }
+ return sz;
+}
+
+struct au_dir_ts_arg {
+ struct dentry *dentry;
+ aufs_bindex_t brid;
+};
+
+static void au_do_dir_ts(void *arg)
+{
+ struct au_dir_ts_arg *a = arg;
+ struct au_dtime dt;
+ struct path h_path;
+ struct inode *dir, *h_dir;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_hinode *hdir;
+ int err;
+ aufs_bindex_t btop, bindex;
+
+ sb = a->dentry->d_sb;
+ if (d_really_is_negative(a->dentry))
+ goto out;
+ /* no dir->i_mutex lock */
+ aufs_read_lock(a->dentry, AuLock_DW); /* noflush */
+
+ dir = d_inode(a->dentry);
+ btop = au_ibtop(dir);
+ bindex = au_br_index(sb, a->brid);
+ if (bindex < btop)
+ goto out_unlock;
+
+ br = au_sbr(sb, bindex);
+ h_path.dentry = au_h_dptr(a->dentry, bindex);
+ if (!h_path.dentry)
+ goto out_unlock;
+ h_path.mnt = au_br_mnt(br);
+ au_dtime_store(&dt, a->dentry, &h_path);
+
+ br = au_sbr(sb, btop);
+ if (!au_br_writable(br->br_perm))
+ goto out_unlock;
+ h_path.dentry = au_h_dptr(a->dentry, btop);
+ h_path.mnt = au_br_mnt(br);
+ err = vfsub_mnt_want_write(h_path.mnt);
+ if (err)
+ goto out_unlock;
+ hdir = au_hi(dir, btop);
+ au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+ h_dir = au_h_iptr(dir, btop);
+ if (h_dir->i_nlink
+ && timespec64_compare(&h_dir->i_mtime, &dt.dt_mtime) < 0) {
+ dt.dt_h_path = h_path;
+ au_dtime_revert(&dt);
+ }
+ au_hn_inode_unlock(hdir);
+ vfsub_mnt_drop_write(h_path.mnt);
+ au_cpup_attr_timesizes(dir);
+
+out_unlock:
+ aufs_read_unlock(a->dentry, AuLock_DW);
+out:
+ dput(a->dentry);
+ au_nwt_done(&au_sbi(sb)->si_nowait);
+ au_kfree_try_rcu(arg);
+}
+
+void au_dir_ts(struct inode *dir, aufs_bindex_t bindex)
+{
+ int perm, wkq_err;
+ aufs_bindex_t btop;
+ struct au_dir_ts_arg *arg;
+ struct dentry *dentry;
+ struct super_block *sb;
+
+ IMustLock(dir);
+
+ dentry = d_find_any_alias(dir);
+ AuDebugOn(!dentry);
+ sb = dentry->d_sb;
+ btop = au_ibtop(dir);
+ if (btop == bindex) {
+ au_cpup_attr_timesizes(dir);
+ goto out;
+ }
+
+ perm = au_sbr_perm(sb, btop);
+ if (!au_br_writable(perm))
+ goto out;
+
+ arg = kmalloc(sizeof(*arg), GFP_NOFS);
+ if (!arg)
+ goto out;
+
+ arg->dentry = dget(dentry); /* will be dput-ted by au_do_dir_ts() */
+ arg->brid = au_sbr_id(sb, bindex);
+ wkq_err = au_wkq_nowait(au_do_dir_ts, arg, sb, /*flags*/0);
+ if (unlikely(wkq_err)) {
+ pr_err("wkq %d\n", wkq_err);
+ dput(dentry);
+ au_kfree_try_rcu(arg);
+ }
+
+out:
+ dput(dentry);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int reopen_dir(struct file *file)
+{
+ int err;
+ unsigned int flags;
+ aufs_bindex_t bindex, btail, btop;
+ struct dentry *dentry, *h_dentry;
+ struct file *h_file;
+
+ /* open all lower dirs */
+ dentry = file->f_path.dentry;
+ btop = au_dbtop(dentry);
+ for (bindex = au_fbtop(file); bindex < btop; bindex++)
+ au_set_h_fptr(file, bindex, NULL);
+ au_set_fbtop(file, btop);
+
+ btail = au_dbtaildir(dentry);
+ for (bindex = au_fbbot_dir(file); btail < bindex; bindex--)
+ au_set_h_fptr(file, bindex, NULL);
+ au_set_fbbot_dir(file, btail);
+
+ flags = vfsub_file_flags(file);
+ for (bindex = btop; bindex <= btail; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+ h_file = au_hf_dir(file, bindex);
+ if (h_file)
+ continue;
+
+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out; /* close all? */
+ au_set_h_fptr(file, bindex, h_file);
+ }
+ au_update_figen(file);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ err = 0;
+
+out:
+ return err;
+}
+
+static int do_open_dir(struct file *file, int flags, struct file *h_file)
+{
+ int err;
+ aufs_bindex_t bindex, btail;
+ struct dentry *dentry, *h_dentry;
+ struct vfsmount *mnt;
+
+ FiMustWriteLock(file);
+ AuDebugOn(h_file);
+
+ err = 0;
+ mnt = file->f_path.mnt;
+ dentry = file->f_path.dentry;
+ file->f_version = inode_query_iversion(d_inode(dentry));
+ bindex = au_dbtop(dentry);
+ au_set_fbtop(file, bindex);
+ btail = au_dbtaildir(dentry);
+ au_set_fbbot_dir(file, btail);
+ for (; !err && bindex <= btail; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+
+ err = vfsub_test_mntns(mnt, h_dentry->d_sb);
+ if (unlikely(err))
+ break;
+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+ if (IS_ERR(h_file)) {
+ err = PTR_ERR(h_file);
+ break;
+ }
+ au_set_h_fptr(file, bindex, h_file);
+ }
+ au_update_figen(file);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ if (!err)
+ return 0; /* success */
+
+ /* close all */
+ for (bindex = au_fbtop(file); bindex <= btail; bindex++)
+ au_set_h_fptr(file, bindex, NULL);
+ au_set_fbtop(file, -1);
+ au_set_fbbot_dir(file, -1);
+
+ return err;
+}
+
+static int aufs_open_dir(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ int err;
+ struct super_block *sb;
+ struct au_fidir *fidir;
+
+ err = -ENOMEM;
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ fidir = au_fidir_alloc(sb);
+ if (fidir) {
+ struct au_do_open_args args = {
+ .open = do_open_dir,
+ .fidir = fidir
+ };
+ err = au_do_open(file, &args);
+ if (unlikely(err))
+ au_kfree_rcu(fidir);
+ }
+ si_read_unlock(sb);
+ return err;
+}
+
+static int aufs_release_dir(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ struct au_vdir *vdir_cache;
+ struct au_finfo *finfo;
+ struct au_fidir *fidir;
+ struct au_hfile *hf;
+ aufs_bindex_t bindex, bbot;
+
+ finfo = au_fi(file);
+ fidir = finfo->fi_hdir;
+ if (fidir) {
+ au_hbl_del(&finfo->fi_hlist,
+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
+ vdir_cache = fidir->fd_vdir_cache; /* lock-free */
+ if (vdir_cache)
+ au_vdir_free(vdir_cache);
+
+ bindex = finfo->fi_btop;
+ if (bindex >= 0) {
+ hf = fidir->fd_hfile + bindex;
+ /*
+ * calls fput() instead of filp_close(),
+ * since no dnotify or lock for the lower file.
+ */
+ bbot = fidir->fd_bbot;
+ for (; bindex <= bbot; bindex++, hf++)
+ if (hf->hf_file)
+ au_hfput(hf, /*execed*/0);
+ }
+ au_kfree_rcu(fidir);
+ finfo->fi_hdir = NULL;
+ }
+ au_finfo_fin(file);
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_dir(struct file *file, fl_owner_t id)
+{
+ int err;
+ aufs_bindex_t bindex, bbot;
+ struct file *h_file;
+
+ err = 0;
+ bbot = au_fbbot_dir(file);
+ for (bindex = au_fbtop(file); !err && bindex <= bbot; bindex++) {
+ h_file = au_hf_dir(file, bindex);
+ if (h_file)
+ err = vfsub_flush(h_file, id);
+ }
+ return err;
+}
+
+static int aufs_flush_dir(struct file *file, fl_owner_t id)
+{
+ return au_do_flush(file, id, au_do_flush_dir);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync)
+{
+ int err;
+ aufs_bindex_t bbot, bindex;
+ struct inode *inode;
+ struct super_block *sb;
+
+ err = 0;
+ sb = dentry->d_sb;
+ inode = d_inode(dentry);
+ IMustLock(inode);
+ bbot = au_dbbot(dentry);
+ for (bindex = au_dbtop(dentry); !err && bindex <= bbot; bindex++) {
+ struct path h_path;
+
+ if (au_test_ro(sb, bindex, inode))
+ continue;
+ h_path.dentry = au_h_dptr(dentry, bindex);
+ if (!h_path.dentry)
+ continue;
+
+ h_path.mnt = au_sbr_mnt(sb, bindex);
+ err = vfsub_fsync(NULL, &h_path, datasync);
+ }
+
+ return err;
+}
+
+static int au_do_fsync_dir(struct file *file, int datasync)
+{
+ int err;
+ aufs_bindex_t bbot, bindex;
+ struct file *h_file;
+ struct super_block *sb;
+ struct inode *inode;
+
+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1, /*fi_lsc*/0);
+ if (unlikely(err))
+ goto out;
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ bbot = au_fbbot_dir(file);
+ for (bindex = au_fbtop(file); !err && bindex <= bbot; bindex++) {
+ h_file = au_hf_dir(file, bindex);
+ if (!h_file || au_test_ro(sb, bindex, inode))
+ continue;
+
+ err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+ }
+
+out:
+ return err;
+}
+
+/*
+ * @file may be NULL
+ */
+static int aufs_fsync_dir(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ int err;
+ struct dentry *dentry;
+ struct inode *inode;
+ struct super_block *sb;
+
+ err = 0;
+ dentry = file->f_path.dentry;
+ inode = d_inode(dentry);
+ inode_lock(inode);
+ sb = dentry->d_sb;
+ si_noflush_read_lock(sb);
+ if (file)
+ err = au_do_fsync_dir(file, datasync);
+ else {
+ di_write_lock_child(dentry);
+ err = au_do_fsync_dir_no_file(dentry, datasync);
+ }
+ au_cpup_attr_timesizes(inode);
+ di_write_unlock(dentry);
+ if (file)
+ fi_write_unlock(file);
+
+ si_read_unlock(sb);
+ inode_unlock(inode);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_iterate_shared(struct file *file, struct dir_context *ctx)
+{
+ int err;
+ struct dentry *dentry;
+ struct inode *inode, *h_inode;
+ struct super_block *sb;
+
+ AuDbg("%pD, ctx{%ps, %llu}\n", file, ctx->actor, ctx->pos);
+
+ dentry = file->f_path.dentry;
+ inode = d_inode(dentry);
+ IMustLock(inode);
+
+ sb = dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1, /*fi_lsc*/0);
+ if (unlikely(err))
+ goto out;
+ err = au_alive_dir(dentry);
+ if (!err)
+ err = au_vdir_init(file);
+ di_downgrade_lock(dentry, AuLock_IR);
+ if (unlikely(err))
+ goto out_unlock;
+
+ h_inode = au_h_iptr(inode, au_ibtop(inode));
+ if (!au_test_nfsd()) {
+ err = au_vdir_fill_de(file, ctx);
+ fsstack_copy_attr_atime(inode, h_inode);
+ } else {
+ /*
+ * nfsd filldir may call lookup_one_len(), vfs_getattr(),
+ * encode_fh() and others.
+ */
+ atomic_inc(&h_inode->i_count);
+ di_read_unlock(dentry, AuLock_IR);
+ si_read_unlock(sb);
+ err = au_vdir_fill_de(file, ctx);
+ fsstack_copy_attr_atime(inode, h_inode);
+ fi_write_unlock(file);
+ iput(h_inode);
+
+ AuTraceErr(err);
+ return err;
+ }
+
+out_unlock:
+ di_read_unlock(dentry, AuLock_IR);
+ fi_write_unlock(file);
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuTestEmpty_WHONLY 1
+#define AuTestEmpty_CALLED (1 << 1)
+#define AuTestEmpty_SHWH (1 << 2)
+#define au_ftest_testempty(flags, name) ((flags) & AuTestEmpty_##name)
+#define au_fset_testempty(flags, name) \
+ do { (flags) |= AuTestEmpty_##name; } while (0)
+#define au_fclr_testempty(flags, name) \
+ do { (flags) &= ~AuTestEmpty_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuTestEmpty_SHWH
+#define AuTestEmpty_SHWH 0
+#endif
+
+struct test_empty_arg {
+ struct dir_context ctx;
+ struct au_nhash *whlist;
+ unsigned int flags;
+ int err;
+ aufs_bindex_t bindex;
+};
+
+static int test_empty_cb(struct dir_context *ctx, const char *__name,
+ int namelen, loff_t offset __maybe_unused, u64 ino,
+ unsigned int d_type)
+{
+ struct test_empty_arg *arg = container_of(ctx, struct test_empty_arg,
+ ctx);
+ char *name = (void *)__name;
+
+ arg->err = 0;
+ au_fset_testempty(arg->flags, CALLED);
+ /* smp_mb(); */
+ if (name[0] == '.'
+ && (namelen == 1 || (name[1] == '.' && namelen == 2)))
+ goto out; /* success */
+
+ if (namelen <= AUFS_WH_PFX_LEN
+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+ if (au_ftest_testempty(arg->flags, WHONLY)
+ && !au_nhash_test_known_wh(arg->whlist, name, namelen))
+ arg->err = -ENOTEMPTY;
+ goto out;
+ }
+
+ name += AUFS_WH_PFX_LEN;
+ namelen -= AUFS_WH_PFX_LEN;
+ if (!au_nhash_test_known_wh(arg->whlist, name, namelen))
+ arg->err = au_nhash_append_wh
+ (arg->whlist, name, namelen, ino, d_type, arg->bindex,
+ au_ftest_testempty(arg->flags, SHWH));
+
+out:
+ /* smp_mb(); */
+ AuTraceErr(arg->err);
+ return arg->err;
+}
+
+static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+ int err;
+ struct file *h_file;
+ struct au_branch *br;
+
+ h_file = au_h_open(dentry, arg->bindex,
+ O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE,
+ /*file*/NULL, /*force_wr*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = 0;
+ if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
+ && !file_inode(h_file)->i_nlink)
+ goto out_put;
+
+ do {
+ arg->err = 0;
+ au_fclr_testempty(arg->flags, CALLED);
+ /* smp_mb(); */
+ err = vfsub_iterate_dir(h_file, &arg->ctx);
+ if (err >= 0)
+ err = arg->err;
+ } while (!err && au_ftest_testempty(arg->flags, CALLED));
+
+out_put:
+ fput(h_file);
+ br = au_sbr(dentry->d_sb, arg->bindex);
+ au_lcnt_dec(&br->br_nfiles);
+out:
+ return err;
+}
+
+struct do_test_empty_args {
+ int *errp;
+ struct dentry *dentry;
+ struct test_empty_arg *arg;
+};
+
+static void call_do_test_empty(void *args)
+{
+ struct do_test_empty_args *a = args;
+ *a->errp = do_test_empty(a->dentry, a->arg);
+}
+
+static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+ int err, wkq_err;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+
+ h_dentry = au_h_dptr(dentry, arg->bindex);
+ h_inode = d_inode(h_dentry);
+ /* todo: i_mode changes anytime? */
+ inode_lock_shared_nested(h_inode, AuLsc_I_CHILD);
+ err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ);
+ inode_unlock_shared(h_inode);
+ if (!err)
+ err = do_test_empty(dentry, arg);
+ else {
+ struct do_test_empty_args args = {
+ .errp = &err,
+ .dentry = dentry,
+ .arg = arg
+ };
+ unsigned int flags = arg->flags;
+
+ wkq_err = au_wkq_wait(call_do_test_empty, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ arg->flags = flags;
+ }
+
+ return err;
+}
+
+int au_test_empty_lower(struct dentry *dentry)
+{
+ int err;
+ unsigned int rdhash;
+ aufs_bindex_t bindex, btop, btail;
+ struct au_nhash whlist;
+ struct test_empty_arg arg = {
+ .ctx = {
+ .actor = test_empty_cb
+ }
+ };
+ int (*test_empty)(struct dentry *dentry, struct test_empty_arg *arg);
+
+ SiMustAnyLock(dentry->d_sb);
+
+ rdhash = au_sbi(dentry->d_sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, dentry));
+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+
+ arg.flags = 0;
+ arg.whlist = &whlist;
+ btop = au_dbtop(dentry);
+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+ au_fset_testempty(arg.flags, SHWH);
+ test_empty = do_test_empty;
+ if (au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1))
+ test_empty = sio_test_empty;
+ arg.bindex = btop;
+ err = test_empty(dentry, &arg);
+ if (unlikely(err))
+ goto out_whlist;
+
+ au_fset_testempty(arg.flags, WHONLY);
+ btail = au_dbtaildir(dentry);
+ for (bindex = btop + 1; !err && bindex <= btail; bindex++) {
+ struct dentry *h_dentry;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry && d_is_positive(h_dentry)) {
+ arg.bindex = bindex;
+ err = test_empty(dentry, &arg);
+ }
+ }
+
+out_whlist:
+ au_nhash_wh_free(&whlist);
+out:
+ return err;
+}
+
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist)
+{
+ int err;
+ struct test_empty_arg arg = {
+ .ctx = {
+ .actor = test_empty_cb
+ }
+ };
+ aufs_bindex_t bindex, btail;
+
+ err = 0;
+ arg.whlist = whlist;
+ arg.flags = AuTestEmpty_WHONLY;
+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+ au_fset_testempty(arg.flags, SHWH);
+ btail = au_dbtaildir(dentry);
+ for (bindex = au_dbtop(dentry); !err && bindex <= btail; bindex++) {
+ struct dentry *h_dentry;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry && d_is_positive(h_dentry)) {
+ arg.bindex = bindex;
+ err = sio_test_empty(dentry, &arg);
+ }
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_dir_fop = {
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = aufs_iterate_shared,
+ .unlocked_ioctl = aufs_ioctl_dir,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = aufs_compat_ioctl_dir,
+#endif
+ .open = aufs_open_dir,
+ .release = aufs_release_dir,
+ .flush = aufs_flush_dir,
+ .fsync = aufs_fsync_dir
+};
diff --git a/fs/aufs/dir.h b/fs/aufs/dir.h
new file mode 100644
index 000000000000..5c8065d258bf
--- /dev/null
+++ b/fs/aufs/dir.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * directory operations
+ */
+
+#ifndef __AUFS_DIR_H__
+#define __AUFS_DIR_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+
+/* ---------------------------------------------------------------------- */
+
+/* need to be faster and smaller */
+
+struct au_nhash {
+ unsigned int nh_num;
+ struct hlist_head *nh_head;
+};
+
+struct au_vdir_destr {
+ unsigned char len;
+ unsigned char name[0];
+} __packed;
+
+struct au_vdir_dehstr {
+ struct hlist_node hash;
+ struct au_vdir_destr *str;
+ struct rcu_head rcu;
+} ____cacheline_aligned_in_smp;
+
+struct au_vdir_de {
+ ino_t de_ino;
+ unsigned char de_type;
+ /* caution: packed */
+ struct au_vdir_destr de_str;
+} __packed;
+
+struct au_vdir_wh {
+ struct hlist_node wh_hash;
+#ifdef CONFIG_AUFS_SHWH
+ ino_t wh_ino;
+ aufs_bindex_t wh_bindex;
+ unsigned char wh_type;
+#else
+ aufs_bindex_t wh_bindex;
+#endif
+ /* caution: packed */
+ struct au_vdir_destr wh_str;
+} __packed;
+
+union au_vdir_deblk_p {
+ unsigned char *deblk;
+ struct au_vdir_de *de;
+};
+
+struct au_vdir {
+ unsigned char **vd_deblk;
+ unsigned long vd_nblk;
+ struct {
+ unsigned long ul;
+ union au_vdir_deblk_p p;
+ } vd_last;
+
+ u64 vd_version;
+ unsigned int vd_deblk_sz;
+ unsigned long vd_jiffy;
+ struct rcu_head rcu;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* dir.c */
+extern const struct file_operations aufs_dir_fop;
+void au_add_nlink(struct inode *dir, struct inode *h_dir);
+void au_sub_nlink(struct inode *dir, struct inode *h_dir);
+loff_t au_dir_size(struct file *file, struct dentry *dentry);
+void au_dir_ts(struct inode *dir, aufs_bindex_t bsrc);
+int au_test_empty_lower(struct dentry *dentry);
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist);
+
+/* vdir.c */
+unsigned int au_rdhash_est(loff_t sz);
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp);
+void au_nhash_wh_free(struct au_nhash *whlist);
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+ int limit);
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen);
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+ unsigned int d_type, aufs_bindex_t bindex,
+ unsigned char shwh);
+void au_vdir_free(struct au_vdir *vdir);
+int au_vdir_init(struct file *file);
+int au_vdir_fill_de(struct file *file, struct dir_context *ctx);
+
+/* ioctl.c */
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_AUFS_RDU
+/* rdu.c */
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#endif
+#else
+AuStub(long, au_rdu_ioctl, return -EINVAL, struct file *file,
+ unsigned int cmd, unsigned long arg)
+#ifdef CONFIG_COMPAT
+AuStub(long, au_rdu_compat_ioctl, return -EINVAL, struct file *file,
+ unsigned int cmd, unsigned long arg)
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DIR_H__ */
diff --git a/fs/aufs/dirren.c b/fs/aufs/dirren.c
new file mode 100644
index 000000000000..0bd412ab68c9
--- /dev/null
+++ b/fs/aufs/dirren.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * special handling in renaming a directory
+ * in order to support looking-up the before-renamed name on the lower readonly
+ * branches
+ */
+
+#include <linux/byteorder/generic.h>
+#include "aufs.h"
+
+static void au_dr_hino_del(struct au_dr_br *dr, struct au_dr_hino *ent)
+{
+ int idx;
+
+ idx = au_dr_ihash(ent->dr_h_ino);
+ au_hbl_del(&ent->dr_hnode, dr->dr_h_ino + idx);
+}
+
+static int au_dr_hino_test_empty(struct au_dr_br *dr)
+{
+ int ret, i;
+ struct hlist_bl_head *hbl;
+
+ ret = 1;
+ for (i = 0; ret && i < AuDirren_NHASH; i++) {
+ hbl = dr->dr_h_ino + i;
+ hlist_bl_lock(hbl);
+ ret &= hlist_bl_empty(hbl);
+ hlist_bl_unlock(hbl);
+ }
+
+ return ret;
+}
+
+static struct au_dr_hino *au_dr_hino_find(struct au_dr_br *dr, ino_t ino)
+{
+ struct au_dr_hino *found, *ent;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos;
+ int idx;
+
+ found = NULL;
+ idx = au_dr_ihash(ino);
+ hbl = dr->dr_h_ino + idx;
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry(ent, pos, hbl, dr_hnode)
+ if (ent->dr_h_ino == ino) {
+ found = ent;
+ break;
+ }
+ hlist_bl_unlock(hbl);
+
+ return found;
+}
+
+int au_dr_hino_test_add(struct au_dr_br *dr, ino_t ino,
+ struct au_dr_hino *add_ent)
+{
+ int found, idx;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos;
+ struct au_dr_hino *ent;
+
+ found = 0;
+ idx = au_dr_ihash(ino);
+ hbl = dr->dr_h_ino + idx;
+#if 0
+ {
+ struct hlist_bl_node *tmp;
+
+ hlist_bl_for_each_entry_safe(ent, pos, tmp, hbl, dr_hnode)
+ AuDbg("hi%llu\n", (unsigned long long)ent->dr_h_ino);
+ }
+#endif
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry(ent, pos, hbl, dr_hnode)
+ if (ent->dr_h_ino == ino) {
+ found = 1;
+ break;
+ }
+ if (!found && add_ent)
+ hlist_bl_add_head(&add_ent->dr_hnode, hbl);
+ hlist_bl_unlock(hbl);
+
+ if (!found && add_ent)
+ AuDbg("i%llu added\n", (unsigned long long)add_ent->dr_h_ino);
+
+ return found;
+}
+
+void au_dr_hino_free(struct au_dr_br *dr)
+{
+ int i;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos, *tmp;
+ struct au_dr_hino *ent;
+
+ /* SiMustWriteLock(sb); */
+
+ for (i = 0; i < AuDirren_NHASH; i++) {
+ hbl = dr->dr_h_ino + i;
+ /* no spinlock since sbinfo must be write-locked */
+ hlist_bl_for_each_entry_safe(ent, pos, tmp, hbl, dr_hnode)
+ au_kfree_rcu(ent);
+ INIT_HLIST_BL_HEAD(hbl);
+ }
+}
+
+/* returns the number of inodes or an error */
+static int au_dr_hino_store(struct super_block *sb, struct au_branch *br,
+ struct file *hinofile)
+{
+ int err, i;
+ ssize_t ssz;
+ loff_t pos, oldsize;
+ __be64 u64;
+ struct inode *hinoinode;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *n1, *n2;
+ struct au_dr_hino *ent;
+
+ SiMustWriteLock(sb);
+ AuDebugOn(!au_br_writable(br->br_perm));
+
+ hinoinode = file_inode(hinofile);
+ oldsize = i_size_read(hinoinode);
+
+ err = 0;
+ pos = 0;
+ hbl = br->br_dirren.dr_h_ino;
+ for (i = 0; !err && i < AuDirren_NHASH; i++, hbl++) {
+ /* no bit-lock since sbinfo must be write-locked */
+ hlist_bl_for_each_entry_safe(ent, n1, n2, hbl, dr_hnode) {
+ AuDbg("hi%llu, %pD2\n",
+ (unsigned long long)ent->dr_h_ino, hinofile);
+ u64 = cpu_to_be64(ent->dr_h_ino);
+ ssz = vfsub_write_k(hinofile, &u64, sizeof(u64), &pos);
+ if (ssz == sizeof(u64))
+ continue;
+
+ /* write error */
+ pr_err("ssz %zd, %pD2\n", ssz, hinofile);
+ err = -ENOSPC;
+ if (ssz < 0)
+ err = ssz;
+ break;
+ }
+ }
+ /* regardless the error */
+ if (pos < oldsize) {
+ err = vfsub_trunc(&hinofile->f_path, pos, /*attr*/0, hinofile);
+ AuTraceErr(err);
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_dr_hino_load(struct au_dr_br *dr, struct file *hinofile)
+{
+ int err, hidx;
+ ssize_t ssz;
+ size_t sz, n;
+ loff_t pos;
+ uint64_t u64;
+ struct au_dr_hino *ent;
+ struct inode *hinoinode;
+ struct hlist_bl_head *hbl;
+
+ err = 0;
+ pos = 0;
+ hbl = dr->dr_h_ino;
+ hinoinode = file_inode(hinofile);
+ sz = i_size_read(hinoinode);
+ AuDebugOn(sz % sizeof(u64));
+ n = sz / sizeof(u64);
+ while (n--) {
+ ssz = vfsub_read_k(hinofile, &u64, sizeof(u64), &pos);
+ if (unlikely(ssz != sizeof(u64))) {
+ pr_err("ssz %zd, %pD2\n", ssz, hinofile);
+ err = -EINVAL;
+ if (ssz < 0)
+ err = ssz;
+ goto out_free;
+ }
+
+ ent = kmalloc(sizeof(*ent), GFP_NOFS);
+ if (!ent) {
+ err = -ENOMEM;
+ AuTraceErr(err);
+ goto out_free;
+ }
+ ent->dr_h_ino = be64_to_cpu((__force __be64)u64);
+ AuDbg("hi%llu, %pD2\n",
+ (unsigned long long)ent->dr_h_ino, hinofile);
+ hidx = au_dr_ihash(ent->dr_h_ino);
+ au_hbl_add(&ent->dr_hnode, hbl + hidx);
+ }
+ goto out; /* success */
+
+out_free:
+ au_dr_hino_free(dr);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * @bindex/@br is a switch to distinguish whether suspending hnotify or not.
+ * @path is a switch to distinguish load and store.
+ */
+static int au_dr_hino(struct super_block *sb, aufs_bindex_t bindex,
+ struct au_branch *br, const struct path *path)
+{
+ int err, flags;
+ unsigned char load, suspend;
+ struct file *hinofile;
+ struct au_hinode *hdir;
+ struct inode *dir, *delegated;
+ struct path hinopath;
+ struct qstr hinoname = QSTR_INIT(AUFS_WH_DR_BRHINO,
+ sizeof(AUFS_WH_DR_BRHINO) - 1);
+
+ AuDebugOn(bindex < 0 && !br);
+ AuDebugOn(bindex >= 0 && br);
+
+ err = -EINVAL;
+ suspend = !br;
+ if (suspend)
+ br = au_sbr(sb, bindex);
+ load = !!path;
+ if (!load) {
+ path = &br->br_path;
+ AuDebugOn(!au_br_writable(br->br_perm));
+ if (unlikely(!au_br_writable(br->br_perm)))
+ goto out;
+ }
+
+ hdir = NULL;
+ if (suspend) {
+ dir = d_inode(sb->s_root);
+ hdir = au_hinode(au_ii(dir), bindex);
+ dir = hdir->hi_inode;
+ au_hn_inode_lock_nested(hdir, AuLsc_I_CHILD);
+ } else {
+ dir = d_inode(path->dentry);
+ inode_lock_nested(dir, AuLsc_I_CHILD);
+ }
+ hinopath.dentry = vfsub_lkup_one(&hinoname, path->dentry);
+ err = PTR_ERR(hinopath.dentry);
+ if (IS_ERR(hinopath.dentry))
+ goto out_unlock;
+
+ err = 0;
+ flags = O_RDONLY;
+ if (load) {
+ if (d_is_negative(hinopath.dentry))
+ goto out_dput; /* success */
+ } else {
+ if (au_dr_hino_test_empty(&br->br_dirren)) {
+ if (d_is_positive(hinopath.dentry)) {
+ delegated = NULL;
+ err = vfsub_unlink(dir, &hinopath, &delegated,
+ /*force*/0);
+ AuTraceErr(err);
+ if (unlikely(err))
+ pr_err("ignored err %d, %pd2\n",
+ err, hinopath.dentry);
+ if (unlikely(err == -EWOULDBLOCK))
+ iput(delegated);
+ err = 0;
+ }
+ goto out_dput;
+ } else if (!d_is_positive(hinopath.dentry)) {
+ err = vfsub_create(dir, &hinopath, 0600,
+ /*want_excl*/false);
+ AuTraceErr(err);
+ if (unlikely(err))
+ goto out_dput;
+ }
+ flags = O_WRONLY;
+ }
+ hinopath.mnt = path->mnt;
+ hinofile = vfsub_dentry_open(&hinopath, flags);
+ if (suspend)
+ au_hn_inode_unlock(hdir);
+ else
+ inode_unlock(dir);
+ dput(hinopath.dentry);
+ AuTraceErrPtr(hinofile);
+ if (IS_ERR(hinofile)) {
+ err = PTR_ERR(hinofile);
+ goto out;
+ }
+
+ if (load)
+ err = au_dr_hino_load(&br->br_dirren, hinofile);
+ else
+ err = au_dr_hino_store(sb, br, hinofile);
+ fput(hinofile);
+ goto out;
+
+out_dput:
+ dput(hinopath.dentry);
+out_unlock:
+ if (suspend)
+ au_hn_inode_unlock(hdir);
+ else
+ inode_unlock(dir);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_dr_brid_init(struct au_dr_brid *brid, const struct path *path)
+{
+ int err;
+ struct kstatfs kstfs;
+ dev_t dev;
+ struct dentry *dentry;
+ struct super_block *sb;
+
+ err = vfs_statfs((void *)path, &kstfs);
+ AuTraceErr(err);
+ if (unlikely(err))
+ goto out;
+
+ /* todo: support for UUID */
+
+ if (kstfs.f_fsid.val[0] || kstfs.f_fsid.val[1]) {
+ brid->type = AuBrid_FSID;
+ brid->fsid = kstfs.f_fsid;
+ } else {
+ dentry = path->dentry;
+ sb = dentry->d_sb;
+ dev = sb->s_dev;
+ if (dev) {
+ brid->type = AuBrid_DEV;
+ brid->dev = dev;
+ }
+ }
+
+out:
+ return err;
+}
+
+int au_dr_br_init(struct super_block *sb, struct au_branch *br,
+ const struct path *path)
+{
+ int err, i;
+ struct au_dr_br *dr;
+ struct hlist_bl_head *hbl;
+
+ dr = &br->br_dirren;
+ hbl = dr->dr_h_ino;
+ for (i = 0; i < AuDirren_NHASH; i++, hbl++)
+ INIT_HLIST_BL_HEAD(hbl);
+
+ err = au_dr_brid_init(&dr->dr_brid, path);
+ if (unlikely(err))
+ goto out;
+
+ if (au_opt_test(au_mntflags(sb), DIRREN))
+ err = au_dr_hino(sb, /*bindex*/-1, br, path);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_dr_br_fin(struct super_block *sb, struct au_branch *br)
+{
+ int err;
+
+ err = 0;
+ if (au_br_writable(br->br_perm))
+ err = au_dr_hino(sb, /*bindex*/-1, br, /*path*/NULL);
+ if (!err)
+ au_dr_hino_free(&br->br_dirren);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_brid_str(struct au_dr_brid *brid, struct inode *h_inode,
+ char *buf, size_t sz)
+{
+ int err;
+ unsigned int major, minor;
+ char *p;
+
+ p = buf;
+ err = snprintf(p, sz, "%d_", brid->type);
+ AuDebugOn(err > sz);
+ p += err;
+ sz -= err;
+ switch (brid->type) {
+ case AuBrid_Unset:
+ return -EINVAL;
+ case AuBrid_UUID:
+ err = snprintf(p, sz, "%pU", brid->uuid.b);
+ break;
+ case AuBrid_FSID:
+ err = snprintf(p, sz, "%08x-%08x",
+ brid->fsid.val[0], brid->fsid.val[1]);
+ break;
+ case AuBrid_DEV:
+ major = MAJOR(brid->dev);
+ minor = MINOR(brid->dev);
+ if (major <= 0xff && minor <= 0xff)
+ err = snprintf(p, sz, "%02x%02x", major, minor);
+ else
+ err = snprintf(p, sz, "%03x:%05x", major, minor);
+ break;
+ }
+ AuDebugOn(err > sz);
+ p += err;
+ sz -= err;
+ err = snprintf(p, sz, "_%llu", (unsigned long long)h_inode->i_ino);
+ AuDebugOn(err > sz);
+ p += err;
+ sz -= err;
+
+ return p - buf;
+}
+
+static int au_drinfo_name(struct au_branch *br, char *name, int len)
+{
+ int rlen;
+ struct dentry *br_dentry;
+ struct inode *br_inode;
+
+ br_dentry = au_br_dentry(br);
+ br_inode = d_inode(br_dentry);
+ rlen = au_brid_str(&br->br_dirren.dr_brid, br_inode, name, len);
+ AuDebugOn(rlen >= AUFS_DIRREN_ENV_VAL_SZ);
+ AuDebugOn(rlen > len);
+
+ return rlen;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * from the given @h_dentry, construct drinfo at @*fdata.
+ * when the size of @*fdata is not enough, reallocate and return new @fdata and
+ * @allocated.
+ */
+static int au_drinfo_construct(struct au_drinfo_fdata **fdata,
+ struct dentry *h_dentry,
+ unsigned char *allocated)
+{
+ int err, v;
+ struct au_drinfo_fdata *f, *p;
+ struct au_drinfo *drinfo;
+ struct inode *h_inode;
+ struct qstr *qname;
+
+ err = 0;
+ f = *fdata;
+ h_inode = d_inode(h_dentry);
+ qname = &h_dentry->d_name;
+ drinfo = &f->drinfo;
+ drinfo->ino = (__force uint64_t)cpu_to_be64(h_inode->i_ino);
+ drinfo->oldnamelen = qname->len;
+ if (*allocated < sizeof(*f) + qname->len) {
+ v = roundup_pow_of_two(*allocated + qname->len);
+ p = au_krealloc(f, v, GFP_NOFS, /*may_shrink*/0);
+ if (unlikely(!p)) {
+ err = -ENOMEM;
+ AuTraceErr(err);
+ goto out;
+ }
+ f = p;
+ *fdata = f;
+ *allocated = v;
+ drinfo = &f->drinfo;
+ }
+ memcpy(drinfo->oldname, qname->name, qname->len);
+ AuDbg("i%llu, %.*s\n",
+ be64_to_cpu((__force __be64)drinfo->ino), drinfo->oldnamelen,
+ drinfo->oldname);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* callers have to free the return value */
+static struct au_drinfo *au_drinfo_read_k(struct file *file, ino_t h_ino)
+{
+ struct au_drinfo *ret, *drinfo;
+ struct au_drinfo_fdata fdata;
+ int len;
+ loff_t pos;
+ ssize_t ssz;
+
+ ret = ERR_PTR(-EIO);
+ pos = 0;
+ ssz = vfsub_read_k(file, &fdata, sizeof(fdata), &pos);
+ if (unlikely(ssz != sizeof(fdata))) {
+ AuIOErr("ssz %zd, %u, %pD2\n",
+ ssz, (unsigned int)sizeof(fdata), file);
+ goto out;
+ }
+
+ fdata.magic = ntohl((__force __be32)fdata.magic);
+ switch (fdata.magic) {
+ case AUFS_DRINFO_MAGIC_V1:
+ break;
+ default:
+ AuIOErr("magic-num 0x%x, 0x%x, %pD2\n",
+ fdata.magic, AUFS_DRINFO_MAGIC_V1, file);
+ goto out;
+ }
+
+ drinfo = &fdata.drinfo;
+ len = drinfo->oldnamelen;
+ if (!len) {
+ AuIOErr("broken drinfo %pD2\n", file);
+ goto out;
+ }
+
+ ret = NULL;
+ drinfo->ino = be64_to_cpu((__force __be64)drinfo->ino);
+ if (unlikely(h_ino && drinfo->ino != h_ino)) {
+ AuDbg("ignored i%llu, i%llu, %pD2\n",
+ (unsigned long long)drinfo->ino,
+ (unsigned long long)h_ino, file);
+ goto out; /* success */
+ }
+
+ ret = kmalloc(sizeof(*ret) + len, GFP_NOFS);
+ if (unlikely(!ret)) {
+ ret = ERR_PTR(-ENOMEM);
+ AuTraceErrPtr(ret);
+ goto out;
+ }
+
+ *ret = *drinfo;
+ ssz = vfsub_read_k(file, (void *)ret->oldname, len, &pos);
+ if (unlikely(ssz != len)) {
+ au_kfree_rcu(ret);
+ ret = ERR_PTR(-EIO);
+ AuIOErr("ssz %zd, %u, %pD2\n", ssz, len, file);
+ goto out;
+ }
+
+ AuDbg("oldname %.*s\n", ret->oldnamelen, ret->oldname);
+
+out:
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* in order to be revertible */
+struct au_drinfo_rev_elm {
+ int created;
+ struct dentry *info_dentry;
+ struct au_drinfo *info_last;
+};
+
+struct au_drinfo_rev {
+ unsigned char already;
+ aufs_bindex_t nelm;
+ struct au_drinfo_rev_elm elm[0];
+};
+
+/* todo: isn't it too large? */
+struct au_drinfo_store {
+ struct path h_ppath;
+ struct dentry *h_dentry;
+ struct au_drinfo_fdata *fdata;
+ char *infoname; /* inside of whname, just after PFX */
+ char whname[sizeof(AUFS_WH_DR_INFO_PFX) + AUFS_DIRREN_ENV_VAL_SZ];
+ aufs_bindex_t btgt, btail;
+ unsigned char no_sio,
+ allocated, /* current size of *fdata */
+ infonamelen, /* room size for p */
+ whnamelen, /* length of the generated name */
+ renameback; /* renamed back */
+};
+
+/* on rename(2) error, the caller should revert it using @elm */
+static int au_drinfo_do_store(struct au_drinfo_store *w,
+ struct au_drinfo_rev_elm *elm)
+{
+ int err, len;
+ ssize_t ssz;
+ loff_t pos;
+ struct path infopath = {
+ .mnt = w->h_ppath.mnt
+ };
+ struct inode *h_dir, *h_inode, *delegated;
+ struct file *infofile;
+ struct qstr *qname;
+
+ AuDebugOn(elm
+ && memcmp(elm, page_address(ZERO_PAGE(0)), sizeof(*elm)));
+
+ infopath.dentry = vfsub_lookup_one_len(w->whname, w->h_ppath.dentry,
+ w->whnamelen);
+ AuTraceErrPtr(infopath.dentry);
+ if (IS_ERR(infopath.dentry)) {
+ err = PTR_ERR(infopath.dentry);
+ goto out;
+ }
+
+ err = 0;
+ h_dir = d_inode(w->h_ppath.dentry);
+ if (elm && d_is_negative(infopath.dentry)) {
+ err = vfsub_create(h_dir, &infopath, 0600, /*want_excl*/true);
+ AuTraceErr(err);
+ if (unlikely(err))
+ goto out_dput;
+ elm->created = 1;
+ elm->info_dentry = dget(infopath.dentry);
+ }
+
+ infofile = vfsub_dentry_open(&infopath, O_RDWR);
+ AuTraceErrPtr(infofile);
+ if (IS_ERR(infofile)) {
+ err = PTR_ERR(infofile);
+ goto out_dput;
+ }
+
+ h_inode = d_inode(infopath.dentry);
+ if (elm && i_size_read(h_inode)) {
+ h_inode = d_inode(w->h_dentry);
+ elm->info_last = au_drinfo_read_k(infofile, h_inode->i_ino);
+ AuTraceErrPtr(elm->info_last);
+ if (IS_ERR(elm->info_last)) {
+ err = PTR_ERR(elm->info_last);
+ elm->info_last = NULL;
+ AuDebugOn(elm->info_dentry);
+ goto out_fput;
+ }
+ }
+
+ if (elm && w->renameback) {
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, &infopath, &delegated, /*force*/0);
+ AuTraceErr(err);
+ if (unlikely(err == -EWOULDBLOCK))
+ iput(delegated);
+ goto out_fput;
+ }
+
+ pos = 0;
+ qname = &w->h_dentry->d_name;
+ len = sizeof(*w->fdata) + qname->len;
+ if (!elm)
+ len = sizeof(*w->fdata) + w->fdata->drinfo.oldnamelen;
+ ssz = vfsub_write_k(infofile, w->fdata, len, &pos);
+ if (ssz == len) {
+ AuDbg("hi%llu, %.*s\n", w->fdata->drinfo.ino,
+ w->fdata->drinfo.oldnamelen, w->fdata->drinfo.oldname);
+ goto out_fput; /* success */
+ } else {
+ err = -EIO;
+ if (ssz < 0)
+ err = ssz;
+ /* the caller should revert it using @elm */
+ }
+
+out_fput:
+ fput(infofile);
+out_dput:
+ dput(infopath.dentry);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+struct au_call_drinfo_do_store_args {
+ int *errp;
+ struct au_drinfo_store *w;
+ struct au_drinfo_rev_elm *elm;
+};
+
+static void au_call_drinfo_do_store(void *args)
+{
+ struct au_call_drinfo_do_store_args *a = args;
+
+ *a->errp = au_drinfo_do_store(a->w, a->elm);
+}
+
+static int au_drinfo_store_sio(struct au_drinfo_store *w,
+ struct au_drinfo_rev_elm *elm)
+{
+ int err, wkq_err;
+
+ if (w->no_sio)
+ err = au_drinfo_do_store(w, elm);
+ else {
+ struct au_call_drinfo_do_store_args a = {
+ .errp = &err,
+ .w = w,
+ .elm = elm
+ };
+ wkq_err = au_wkq_wait(au_call_drinfo_do_store, &a);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+ AuTraceErr(err);
+
+ return err;
+}
+
+static int au_drinfo_store_work_init(struct au_drinfo_store *w,
+ aufs_bindex_t btgt)
+{
+ int err;
+
+ memset(w, 0, sizeof(*w));
+ w->allocated = roundup_pow_of_two(sizeof(*w->fdata) + 40);
+ strcpy(w->whname, AUFS_WH_DR_INFO_PFX);
+ w->infoname = w->whname + sizeof(AUFS_WH_DR_INFO_PFX) - 1;
+ w->infonamelen = sizeof(w->whname) - sizeof(AUFS_WH_DR_INFO_PFX);
+ w->btgt = btgt;
+ w->no_sio = !!uid_eq(current_fsuid(), GLOBAL_ROOT_UID);
+
+ err = -ENOMEM;
+ w->fdata = kcalloc(1, w->allocated, GFP_NOFS);
+ if (unlikely(!w->fdata)) {
+ AuTraceErr(err);
+ goto out;
+ }
+ w->fdata->magic = (__force uint32_t)htonl(AUFS_DRINFO_MAGIC_V1);
+ err = 0;
+
+out:
+ return err;
+}
+
+static void au_drinfo_store_work_fin(struct au_drinfo_store *w)
+{
+ au_kfree_rcu(w->fdata);
+}
+
+static void au_drinfo_store_rev(struct au_drinfo_rev *rev,
+ struct au_drinfo_store *w)
+{
+ struct au_drinfo_rev_elm *elm;
+ struct inode *h_dir, *delegated;
+ int err, nelm;
+ struct path infopath = {
+ .mnt = w->h_ppath.mnt
+ };
+
+ h_dir = d_inode(w->h_ppath.dentry);
+ IMustLock(h_dir);
+
+ err = 0;
+ elm = rev->elm;
+ for (nelm = rev->nelm; nelm > 0; nelm--, elm++) {
+ AuDebugOn(elm->created && elm->info_last);
+ if (elm->created) {
+ AuDbg("here\n");
+ delegated = NULL;
+ infopath.dentry = elm->info_dentry;
+ err = vfsub_unlink(h_dir, &infopath, &delegated,
+ !w->no_sio);
+ AuTraceErr(err);
+ if (unlikely(err == -EWOULDBLOCK))
+ iput(delegated);
+ dput(elm->info_dentry);
+ } else if (elm->info_last) {
+ AuDbg("here\n");
+ w->fdata->drinfo = *elm->info_last;
+ memcpy(w->fdata->drinfo.oldname,
+ elm->info_last->oldname,
+ elm->info_last->oldnamelen);
+ err = au_drinfo_store_sio(w, /*elm*/NULL);
+ au_kfree_rcu(elm->info_last);
+ }
+ if (unlikely(err))
+ AuIOErr("%d, %s\n", err, w->whname);
+ /* go on even if err */
+ }
+}
+
+/* caller has to call au_dr_rename_fin() later */
+static int au_drinfo_store(struct dentry *dentry, aufs_bindex_t btgt,
+ struct qstr *dst_name, void *_rev)
+{
+ int err, sz, nelm;
+ aufs_bindex_t bindex, btail;
+ struct au_drinfo_store work;
+ struct au_drinfo_rev *rev, **p;
+ struct au_drinfo_rev_elm *elm;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_hinode *hdir;
+
+ err = au_drinfo_store_work_init(&work, btgt);
+ AuTraceErr(err);
+ if (unlikely(err))
+ goto out;
+
+ err = -ENOMEM;
+ btail = au_dbtaildir(dentry);
+ nelm = btail - btgt;
+ sz = sizeof(*rev) + sizeof(*elm) * nelm;
+ rev = kcalloc(1, sz, GFP_NOFS);
+ if (unlikely(!rev)) {
+ AuTraceErr(err);
+ goto out_args;
+ }
+ rev->nelm = nelm;
+ elm = rev->elm;
+ p = _rev;
+ *p = rev;
+
+ err = 0;
+ sb = dentry->d_sb;
+ work.h_ppath.dentry = au_h_dptr(dentry, btgt);
+ work.h_ppath.mnt = au_sbr_mnt(sb, btgt);
+ hdir = au_hi(d_inode(dentry), btgt);
+ au_hn_inode_lock_nested(hdir, AuLsc_I_CHILD);
+ for (bindex = btgt + 1; bindex <= btail; bindex++, elm++) {
+ work.h_dentry = au_h_dptr(dentry, bindex);
+ if (!work.h_dentry)
+ continue;
+
+ err = au_drinfo_construct(&work.fdata, work.h_dentry,
+ &work.allocated);
+ AuTraceErr(err);
+ if (unlikely(err))
+ break;
+
+ work.renameback = au_qstreq(&work.h_dentry->d_name, dst_name);
+ br = au_sbr(sb, bindex);
+ work.whnamelen = sizeof(AUFS_WH_DR_INFO_PFX) - 1;
+ work.whnamelen += au_drinfo_name(br, work.infoname,
+ work.infonamelen);
+ AuDbg("whname %.*s, i%llu, %.*s\n",
+ work.whnamelen, work.whname,
+ be64_to_cpu((__force __be64)work.fdata->drinfo.ino),
+ work.fdata->drinfo.oldnamelen,
+ work.fdata->drinfo.oldname);
+
+ err = au_drinfo_store_sio(&work, elm);
+ AuTraceErr(err);
+ if (unlikely(err))
+ break;
+ }
+ if (unlikely(err)) {
+ /* revert all drinfo */
+ au_drinfo_store_rev(rev, &work);
+ au_kfree_try_rcu(rev);
+ *p = NULL;
+ }
+ au_hn_inode_unlock(hdir);
+
+out_args:
+ au_drinfo_store_work_fin(&work);
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dr_rename(struct dentry *src, aufs_bindex_t bindex,
+ struct qstr *dst_name, void *_rev)
+{
+ int err, already;
+ ino_t ino;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_dr_br *dr;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ struct au_dr_hino *ent;
+ struct au_drinfo_rev *rev, **p;
+
+ AuDbg("bindex %d\n", bindex);
+
+ err = -ENOMEM;
+ ent = kmalloc(sizeof(*ent), GFP_NOFS);
+ if (unlikely(!ent))
+ goto out;
+
+ sb = src->d_sb;
+ br = au_sbr(sb, bindex);
+ dr = &br->br_dirren;
+ h_dentry = au_h_dptr(src, bindex);
+ h_inode = d_inode(h_dentry);
+ ino = h_inode->i_ino;
+ ent->dr_h_ino = ino;
+ already = au_dr_hino_test_add(dr, ino, ent);
+ AuDbg("b%d, hi%llu, already %d\n",
+ bindex, (unsigned long long)ino, already);
+
+ err = au_drinfo_store(src, bindex, dst_name, _rev);
+ AuTraceErr(err);
+ if (!err) {
+ p = _rev;
+ rev = *p;
+ rev->already = already;
+ goto out; /* success */
+ }
+
+ /* revert */
+ if (!already)
+ au_dr_hino_del(dr, ent);
+ au_kfree_rcu(ent);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+void au_dr_rename_fin(struct dentry *src, aufs_bindex_t btgt, void *_rev)
+{
+ struct au_drinfo_rev *rev;
+ struct au_drinfo_rev_elm *elm;
+ int nelm;
+
+ rev = _rev;
+ elm = rev->elm;
+ for (nelm = rev->nelm; nelm > 0; nelm--, elm++) {
+ dput(elm->info_dentry);
+ au_kfree_rcu(elm->info_last);
+ }
+ au_kfree_try_rcu(rev);
+}
+
+void au_dr_rename_rev(struct dentry *src, aufs_bindex_t btgt, void *_rev)
+{
+ int err;
+ struct au_drinfo_store work;
+ struct au_drinfo_rev *rev = _rev;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct inode *h_inode;
+ struct au_dr_br *dr;
+ struct au_dr_hino *ent;
+
+ err = au_drinfo_store_work_init(&work, btgt);
+ if (unlikely(err))
+ goto out;
+
+ sb = src->d_sb;
+ br = au_sbr(sb, btgt);
+ work.h_ppath.dentry = au_h_dptr(src, btgt);
+ work.h_ppath.mnt = au_br_mnt(br);
+ au_drinfo_store_rev(rev, &work);
+ au_drinfo_store_work_fin(&work);
+ if (rev->already)
+ goto out;
+
+ dr = &br->br_dirren;
+ h_inode = d_inode(work.h_ppath.dentry);
+ ent = au_dr_hino_find(dr, h_inode->i_ino);
+ BUG_ON(!ent);
+ au_dr_hino_del(dr, ent);
+ au_kfree_rcu(ent);
+
+out:
+ au_kfree_try_rcu(rev);
+ if (unlikely(err))
+ pr_err("failed to remove dirren info\n");
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct au_drinfo *au_drinfo_do_load(struct path *h_ppath,
+ char *whname, int whnamelen,
+ struct dentry **info_dentry)
+{
+ struct au_drinfo *drinfo;
+ struct file *f;
+ struct inode *h_dir;
+ struct path infopath;
+ int unlocked;
+
+ AuDbg("%pd/%.*s\n", h_ppath->dentry, whnamelen, whname);
+
+ *info_dentry = NULL;
+ drinfo = NULL;
+ unlocked = 0;
+ h_dir = d_inode(h_ppath->dentry);
+ inode_lock_shared_nested(h_dir, AuLsc_I_PARENT);
+ infopath.dentry = vfsub_lookup_one_len(whname, h_ppath->dentry,
+ whnamelen);
+ if (IS_ERR(infopath.dentry)) {
+ drinfo = (void *)infopath.dentry;
+ goto out;
+ }
+
+ if (d_is_negative(infopath.dentry))
+ goto out_dput; /* success */
+
+ infopath.mnt = h_ppath->mnt;
+ f = vfsub_dentry_open(&infopath, O_RDONLY);
+ inode_unlock_shared(h_dir);
+ unlocked = 1;
+ if (IS_ERR(f)) {
+ drinfo = (void *)f;
+ goto out_dput;
+ }
+
+ drinfo = au_drinfo_read_k(f, /*h_ino*/0);
+ if (IS_ERR_OR_NULL(drinfo))
+ goto out_fput;
+
+ AuDbg("oldname %.*s\n", drinfo->oldnamelen, drinfo->oldname);
+ *info_dentry = dget(infopath.dentry); /* keep it alive */
+
+out_fput:
+ fput(f);
+out_dput:
+ dput(infopath.dentry);
+out:
+ if (!unlocked)
+ inode_unlock_shared(h_dir);
+ AuTraceErrPtr(drinfo);
+ return drinfo;
+}
+
+struct au_drinfo_do_load_args {
+ struct au_drinfo **drinfop;
+ struct path *h_ppath;
+ char *whname;
+ int whnamelen;
+ struct dentry **info_dentry;
+};
+
+static void au_call_drinfo_do_load(void *args)
+{
+ struct au_drinfo_do_load_args *a = args;
+
+ *a->drinfop = au_drinfo_do_load(a->h_ppath, a->whname, a->whnamelen,
+ a->info_dentry);
+}
+
+struct au_drinfo_load {
+ struct path h_ppath;
+ struct qstr *qname;
+ unsigned char no_sio;
+
+ aufs_bindex_t ninfo;
+ struct au_drinfo **drinfo;
+};
+
+static int au_drinfo_load(struct au_drinfo_load *w, aufs_bindex_t bindex,
+ struct au_branch *br)
+{
+ int err, wkq_err, whnamelen, e;
+ char whname[sizeof(AUFS_WH_DR_INFO_PFX) + AUFS_DIRREN_ENV_VAL_SZ]
+ = AUFS_WH_DR_INFO_PFX;
+ struct au_drinfo *drinfo;
+ struct qstr oldname;
+ struct inode *h_dir, *delegated;
+ struct dentry *info_dentry;
+ struct path infopath;
+
+ whnamelen = sizeof(AUFS_WH_DR_INFO_PFX) - 1;
+ whnamelen += au_drinfo_name(br, whname + whnamelen,
+ sizeof(whname) - whnamelen);
+ if (w->no_sio)
+ drinfo = au_drinfo_do_load(&w->h_ppath, whname, whnamelen,
+ &info_dentry);
+ else {
+ struct au_drinfo_do_load_args args = {
+ .drinfop = &drinfo,
+ .h_ppath = &w->h_ppath,
+ .whname = whname,
+ .whnamelen = whnamelen,
+ .info_dentry = &info_dentry
+ };
+ wkq_err = au_wkq_wait(au_call_drinfo_do_load, &args);
+ if (unlikely(wkq_err))
+ drinfo = ERR_PTR(wkq_err);
+ }
+ err = PTR_ERR(drinfo);
+ if (IS_ERR_OR_NULL(drinfo))
+ goto out;
+
+ err = 0;
+ oldname.len = drinfo->oldnamelen;
+ oldname.name = drinfo->oldname;
+ if (au_qstreq(w->qname, &oldname)) {
+ /* the name is renamed back */
+ au_kfree_rcu(drinfo);
+ drinfo = NULL;
+
+ infopath.dentry = info_dentry;
+ infopath.mnt = w->h_ppath.mnt;
+ h_dir = d_inode(w->h_ppath.dentry);
+ delegated = NULL;
+ inode_lock_nested(h_dir, AuLsc_I_PARENT);
+ e = vfsub_unlink(h_dir, &infopath, &delegated, !w->no_sio);
+ inode_unlock(h_dir);
+ if (unlikely(e))
+ AuIOErr("ignored %d, %pd2\n", e, &infopath.dentry);
+ if (unlikely(e == -EWOULDBLOCK))
+ iput(delegated);
+ }
+ au_kfree_rcu(w->drinfo[bindex]);
+ w->drinfo[bindex] = drinfo;
+ dput(info_dentry);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_dr_lkup_free(struct au_drinfo **drinfo, int n)
+{
+ struct au_drinfo **p = drinfo;
+
+ while (n-- > 0)
+ au_kfree_rcu(*drinfo++);
+ au_kfree_try_rcu(p);
+}
+
+int au_dr_lkup(struct au_do_lookup_args *lkup, struct dentry *dentry,
+ aufs_bindex_t btgt)
+{
+ int err, ninfo;
+ struct au_drinfo_load w;
+ aufs_bindex_t bindex, bbot;
+ struct au_branch *br;
+ struct inode *h_dir;
+ struct au_dr_hino *ent;
+ struct super_block *sb;
+
+ AuDbg("%.*s, name %.*s, whname %.*s, b%d\n",
+ AuLNPair(&dentry->d_name), AuLNPair(&lkup->dirren.dr_name),
+ AuLNPair(&lkup->whname), btgt);
+
+ sb = dentry->d_sb;
+ bbot = au_sbbot(sb);
+ w.ninfo = bbot + 1;
+ if (!lkup->dirren.drinfo) {
+ lkup->dirren.drinfo = kcalloc(w.ninfo,
+ sizeof(*lkup->dirren.drinfo),
+ GFP_NOFS);
+ if (unlikely(!lkup->dirren.drinfo)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ lkup->dirren.ninfo = w.ninfo;
+ }
+ w.drinfo = lkup->dirren.drinfo;
+ w.no_sio = !!uid_eq(current_fsuid(), GLOBAL_ROOT_UID);
+ w.h_ppath.dentry = au_h_dptr(dentry, btgt);
+ AuDebugOn(!w.h_ppath.dentry);
+ w.h_ppath.mnt = au_sbr_mnt(sb, btgt);
+ w.qname = &dentry->d_name;
+
+ ninfo = 0;
+ for (bindex = btgt + 1; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ err = au_drinfo_load(&w, bindex, br);
+ if (unlikely(err))
+ goto out_free;
+ if (w.drinfo[bindex])
+ ninfo++;
+ }
+ if (!ninfo) {
+ br = au_sbr(sb, btgt);
+ h_dir = d_inode(w.h_ppath.dentry);
+ ent = au_dr_hino_find(&br->br_dirren, h_dir->i_ino);
+ AuDebugOn(!ent);
+ au_dr_hino_del(&br->br_dirren, ent);
+ au_kfree_rcu(ent);
+ }
+ goto out; /* success */
+
+out_free:
+ au_dr_lkup_free(lkup->dirren.drinfo, lkup->dirren.ninfo);
+ lkup->dirren.ninfo = 0;
+ lkup->dirren.drinfo = NULL;
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+void au_dr_lkup_fin(struct au_do_lookup_args *lkup)
+{
+ au_dr_lkup_free(lkup->dirren.drinfo, lkup->dirren.ninfo);
+}
+
+int au_dr_lkup_name(struct au_do_lookup_args *lkup, aufs_bindex_t btgt)
+{
+ int err;
+ struct au_drinfo *drinfo;
+
+ err = 0;
+ if (!lkup->dirren.drinfo)
+ goto out;
+ AuDebugOn(lkup->dirren.ninfo < btgt + 1);
+ drinfo = lkup->dirren.drinfo[btgt + 1];
+ if (!drinfo)
+ goto out;
+
+ au_kfree_try_rcu(lkup->whname.name);
+ lkup->whname.name = NULL;
+ lkup->dirren.dr_name.len = drinfo->oldnamelen;
+ lkup->dirren.dr_name.name = drinfo->oldname;
+ lkup->name = &lkup->dirren.dr_name;
+ err = au_wh_name_alloc(&lkup->whname, lkup->name);
+ if (!err)
+ AuDbg("name %.*s, whname %.*s, b%d\n",
+ AuLNPair(lkup->name), AuLNPair(&lkup->whname),
+ btgt);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_dr_lkup_h_ino(struct au_do_lookup_args *lkup, aufs_bindex_t bindex,
+ ino_t h_ino)
+{
+ int match;
+ struct au_drinfo *drinfo;
+
+ match = 1;
+ if (!lkup->dirren.drinfo)
+ goto out;
+ AuDebugOn(lkup->dirren.ninfo < bindex + 1);
+ drinfo = lkup->dirren.drinfo[bindex + 1];
+ if (!drinfo)
+ goto out;
+
+ match = (drinfo->ino == h_ino);
+ AuDbg("match %d\n", match);
+
+out:
+ return match;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dr_opt_set(struct super_block *sb)
+{
+ int err;
+ aufs_bindex_t bindex, bbot;
+ struct au_branch *br;
+
+ err = 0;
+ bbot = au_sbbot(sb);
+ for (bindex = 0; !err && bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ err = au_dr_hino(sb, bindex, /*br*/NULL, &br->br_path);
+ }
+
+ return err;
+}
+
+int au_dr_opt_flush(struct super_block *sb)
+{
+ int err;
+ aufs_bindex_t bindex, bbot;
+ struct au_branch *br;
+
+ err = 0;
+ bbot = au_sbbot(sb);
+ for (bindex = 0; !err && bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_writable(br->br_perm))
+ err = au_dr_hino(sb, bindex, /*br*/NULL, /*path*/NULL);
+ }
+
+ return err;
+}
+
+int au_dr_opt_clr(struct super_block *sb, int no_flush)
+{
+ int err;
+ aufs_bindex_t bindex, bbot;
+ struct au_branch *br;
+
+ err = 0;
+ if (!no_flush) {
+ err = au_dr_opt_flush(sb);
+ if (unlikely(err))
+ goto out;
+ }
+
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ au_dr_hino_free(&br->br_dirren);
+ }
+
+out:
+ return err;
+}
diff --git a/fs/aufs/dirren.h b/fs/aufs/dirren.h
new file mode 100644
index 000000000000..a986736462c3
--- /dev/null
+++ b/fs/aufs/dirren.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * renamed dir info
+ */
+
+#ifndef __AUFS_DIRREN_H__
+#define __AUFS_DIRREN_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include <linux/statfs.h>
+#include <linux/uuid.h>
+#include "hbl.h"
+
+#define AuDirren_NHASH 100
+
+#ifdef CONFIG_AUFS_DIRREN
+enum au_brid_type {
+ AuBrid_Unset,
+ AuBrid_UUID,
+ AuBrid_FSID,
+ AuBrid_DEV
+};
+
+struct au_dr_brid {
+ enum au_brid_type type;
+ union {
+ uuid_t uuid; /* unimplemented yet */
+ fsid_t fsid;
+ dev_t dev;
+ };
+};
+
+/* 20 is the max digits length of ulong 64 */
+/* brid-type "_" uuid "_" inum */
+#define AUFS_DIRREN_FNAME_SZ (1 + 1 + UUID_STRING_LEN + 20)
+#define AUFS_DIRREN_ENV_VAL_SZ (AUFS_DIRREN_FNAME_SZ + 1 + 20)
+
+struct au_dr_hino {
+ struct hlist_bl_node dr_hnode;
+ ino_t dr_h_ino;
+};
+
+struct au_dr_br {
+ struct hlist_bl_head dr_h_ino[AuDirren_NHASH];
+ struct au_dr_brid dr_brid;
+};
+
+struct au_dr_lookup {
+ /* dr_name is pointed by struct au_do_lookup_args.name */
+ struct qstr dr_name; /* subset of dr_info */
+ aufs_bindex_t ninfo;
+ struct au_drinfo **drinfo;
+};
+#else
+struct au_dr_hino;
+/* empty */
+struct au_dr_br { };
+struct au_dr_lookup { };
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+struct au_branch;
+struct au_do_lookup_args;
+struct au_hinode;
+#ifdef CONFIG_AUFS_DIRREN
+int au_dr_hino_test_add(struct au_dr_br *dr, ino_t h_ino,
+ struct au_dr_hino *add_ent);
+void au_dr_hino_free(struct au_dr_br *dr);
+int au_dr_br_init(struct super_block *sb, struct au_branch *br,
+ const struct path *path);
+int au_dr_br_fin(struct super_block *sb, struct au_branch *br);
+int au_dr_rename(struct dentry *src, aufs_bindex_t bindex,
+ struct qstr *dst_name, void *_rev);
+void au_dr_rename_fin(struct dentry *src, aufs_bindex_t btgt, void *rev);
+void au_dr_rename_rev(struct dentry *src, aufs_bindex_t bindex, void *rev);
+int au_dr_lkup(struct au_do_lookup_args *lkup, struct dentry *dentry,
+ aufs_bindex_t bindex);
+int au_dr_lkup_name(struct au_do_lookup_args *lkup, aufs_bindex_t btgt);
+int au_dr_lkup_h_ino(struct au_do_lookup_args *lkup, aufs_bindex_t bindex,
+ ino_t h_ino);
+void au_dr_lkup_fin(struct au_do_lookup_args *lkup);
+int au_dr_opt_set(struct super_block *sb);
+int au_dr_opt_flush(struct super_block *sb);
+int au_dr_opt_clr(struct super_block *sb, int no_flush);
+#else
+AuStubInt0(au_dr_hino_test_add, struct au_dr_br *dr, ino_t h_ino,
+ struct au_dr_hino *add_ent);
+AuStubVoid(au_dr_hino_free, struct au_dr_br *dr);
+AuStubInt0(au_dr_br_init, struct super_block *sb, struct au_branch *br,
+ const struct path *path);
+AuStubInt0(au_dr_br_fin, struct super_block *sb, struct au_branch *br);
+AuStubInt0(au_dr_rename, struct dentry *src, aufs_bindex_t bindex,
+ struct qstr *dst_name, void *_rev);
+AuStubVoid(au_dr_rename_fin, struct dentry *src, aufs_bindex_t btgt, void *rev);
+AuStubVoid(au_dr_rename_rev, struct dentry *src, aufs_bindex_t bindex,
+ void *rev);
+AuStubInt0(au_dr_lkup, struct au_do_lookup_args *lkup, struct dentry *dentry,
+ aufs_bindex_t bindex);
+AuStubInt0(au_dr_lkup_name, struct au_do_lookup_args *lkup, aufs_bindex_t btgt);
+AuStubInt0(au_dr_lkup_h_ino, struct au_do_lookup_args *lkup,
+ aufs_bindex_t bindex, ino_t h_ino);
+AuStubVoid(au_dr_lkup_fin, struct au_do_lookup_args *lkup);
+AuStubInt0(au_dr_opt_set, struct super_block *sb);
+AuStubInt0(au_dr_opt_flush, struct super_block *sb);
+AuStubInt0(au_dr_opt_clr, struct super_block *sb, int no_flush);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_DIRREN
+static inline int au_dr_ihash(ino_t h_ino)
+{
+ return h_ino % AuDirren_NHASH;
+}
+#else
+AuStubInt0(au_dr_ihash, ino_t h_ino);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DIRREN_H__ */
diff --git a/fs/aufs/dynop.c b/fs/aufs/dynop.c
new file mode 100644
index 000000000000..06c558367944
--- /dev/null
+++ b/fs/aufs/dynop.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * dynamically customizable operations for regular files
+ */
+
+#include "aufs.h"
+
+#define DyPrSym(key) AuDbgSym(key->dk_op.dy_hop)
+
+/*
+ * How large will these lists be?
+ * Usually just a few elements, 20-30 at most for each, I guess.
+ */
+static struct hlist_bl_head dynop[AuDyLast];
+
+static struct au_dykey *dy_gfind_get(struct hlist_bl_head *hbl,
+ const void *h_op)
+{
+ struct au_dykey *key, *tmp;
+ struct hlist_bl_node *pos;
+
+ key = NULL;
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry(tmp, pos, hbl, dk_hnode)
+ if (tmp->dk_op.dy_hop == h_op) {
+ key = tmp;
+ kref_get(&key->dk_kref);
+ break;
+ }
+ hlist_bl_unlock(hbl);
+
+ return key;
+}
+
+static struct au_dykey *dy_bradd(struct au_branch *br, struct au_dykey *key)
+{
+ struct au_dykey **k, *found;
+ const void *h_op = key->dk_op.dy_hop;
+ int i;
+
+ found = NULL;
+ k = br->br_dykey;
+ for (i = 0; i < AuBrDynOp; i++)
+ if (k[i]) {
+ if (k[i]->dk_op.dy_hop == h_op) {
+ found = k[i];
+ break;
+ }
+ } else
+ break;
+ if (!found) {
+ spin_lock(&br->br_dykey_lock);
+ for (; i < AuBrDynOp; i++)
+ if (k[i]) {
+ if (k[i]->dk_op.dy_hop == h_op) {
+ found = k[i];
+ break;
+ }
+ } else {
+ k[i] = key;
+ break;
+ }
+ spin_unlock(&br->br_dykey_lock);
+ BUG_ON(i == AuBrDynOp); /* expand the array */
+ }
+
+ return found;
+}
+
+/* kref_get() if @key is already added */
+static struct au_dykey *dy_gadd(struct hlist_bl_head *hbl, struct au_dykey *key)
+{
+ struct au_dykey *tmp, *found;
+ struct hlist_bl_node *pos;
+ const void *h_op = key->dk_op.dy_hop;
+
+ found = NULL;
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry(tmp, pos, hbl, dk_hnode)
+ if (tmp->dk_op.dy_hop == h_op) {
+ kref_get(&tmp->dk_kref);
+ found = tmp;
+ break;
+ }
+ if (!found)
+ hlist_bl_add_head(&key->dk_hnode, hbl);
+ hlist_bl_unlock(hbl);
+
+ if (!found)
+ DyPrSym(key);
+ return found;
+}
+
+static void dy_free_rcu(struct rcu_head *rcu)
+{
+ struct au_dykey *key;
+
+ key = container_of(rcu, struct au_dykey, dk_rcu);
+ DyPrSym(key);
+ au_kfree_rcu(key);
+}
+
+static void dy_free(struct kref *kref)
+{
+ struct au_dykey *key;
+ struct hlist_bl_head *hbl;
+
+ key = container_of(kref, struct au_dykey, dk_kref);
+ hbl = dynop + key->dk_op.dy_type;
+ au_hbl_del(&key->dk_hnode, hbl);
+ call_rcu(&key->dk_rcu, dy_free_rcu);
+}
+
+void au_dy_put(struct au_dykey *key)
+{
+ kref_put(&key->dk_kref, dy_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define DyDbgSize(cnt, op) AuDebugOn(cnt != sizeof(op)/sizeof(void *))
+
+#ifdef CONFIG_AUFS_DEBUG
+#define DyDbgDeclare(cnt) unsigned int cnt = 0
+#define DyDbgInc(cnt) do { cnt++; } while (0)
+#else
+#define DyDbgDeclare(cnt) do {} while (0)
+#define DyDbgInc(cnt) do {} while (0)
+#endif
+
+#define DySet(func, dst, src, h_op, h_sb) do { \
+ DyDbgInc(cnt); \
+ if (h_op->func) { \
+ if (src.func) \
+ dst.func = src.func; \
+ else \
+ AuDbg("%s %s\n", au_sbtype(h_sb), #func); \
+ } \
+} while (0)
+
+#define DySetForce(func, dst, src) do { \
+ AuDebugOn(!src.func); \
+ DyDbgInc(cnt); \
+ dst.func = src.func; \
+} while (0)
+
+#define DySetAop(func) \
+ DySet(func, dyaop->da_op, aufs_aop, h_aop, h_sb)
+#define DySetAopForce(func) \
+ DySetForce(func, dyaop->da_op, aufs_aop)
+
+static void dy_aop(struct au_dykey *key, const void *h_op,
+ struct super_block *h_sb __maybe_unused)
+{
+ struct au_dyaop *dyaop = (void *)key;
+ const struct address_space_operations *h_aop = h_op;
+ DyDbgDeclare(cnt);
+
+ AuDbg("%s\n", au_sbtype(h_sb));
+
+ DySetAop(writepage);
+ DySetAopForce(readpage); /* force */
+ DySetAop(writepages);
+ DySetAop(set_page_dirty);
+ DySetAop(readpages);
+ DySetAop(write_begin);
+ DySetAop(write_end);
+ DySetAop(bmap);
+ DySetAop(invalidatepage);
+ DySetAop(releasepage);
+ DySetAop(freepage);
+ /* this one will be changed according to an aufs mount option */
+ DySetAop(direct_IO);
+ DySetAop(migratepage);
+ DySetAop(isolate_page);
+ DySetAop(putback_page);
+ DySetAop(launder_page);
+ DySetAop(is_partially_uptodate);
+ DySetAop(is_dirty_writeback);
+ DySetAop(error_remove_page);
+ DySetAop(swap_activate);
+ DySetAop(swap_deactivate);
+
+ DyDbgSize(cnt, *h_aop);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void dy_bug(struct kref *kref)
+{
+ BUG();
+}
+
+static struct au_dykey *dy_get(struct au_dynop *op, struct au_branch *br)
+{
+ struct au_dykey *key, *old;
+ struct hlist_bl_head *hbl;
+ struct op {
+ unsigned int sz;
+ void (*set)(struct au_dykey *key, const void *h_op,
+ struct super_block *h_sb __maybe_unused);
+ };
+ static const struct op a[] = {
+ [AuDy_AOP] = {
+ .sz = sizeof(struct au_dyaop),
+ .set = dy_aop
+ }
+ };
+ const struct op *p;
+
+ hbl = dynop + op->dy_type;
+ key = dy_gfind_get(hbl, op->dy_hop);
+ if (key)
+ goto out_add; /* success */
+
+ p = a + op->dy_type;
+ key = kzalloc(p->sz, GFP_NOFS);
+ if (unlikely(!key)) {
+ key = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ key->dk_op.dy_hop = op->dy_hop;
+ kref_init(&key->dk_kref);
+ p->set(key, op->dy_hop, au_br_sb(br));
+ old = dy_gadd(hbl, key);
+ if (old) {
+ au_kfree_rcu(key);
+ key = old;
+ }
+
+out_add:
+ old = dy_bradd(br, key);
+ if (old)
+ /* its ref-count should never be zero here */
+ kref_put(&key->dk_kref, dy_bug);
+out:
+ return key;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Aufs prohibits O_DIRECT by default even if the branch supports it.
+ * This behaviour is necessary to return an error from open(O_DIRECT) instead
+ * of the succeeding I/O. The dio mount option enables O_DIRECT and makes
+ * open(O_DIRECT) always succeed, but the succeeding I/O may return an error.
+ * See the aufs manual in detail.
+ */
+static void dy_adx(struct au_dyaop *dyaop, int do_dx)
+{
+ if (!do_dx)
+ dyaop->da_op.direct_IO = NULL;
+ else
+ dyaop->da_op.direct_IO = aufs_aop.direct_IO;
+}
+
+static struct au_dyaop *dy_aget(struct au_branch *br,
+ const struct address_space_operations *h_aop,
+ int do_dx)
+{
+ struct au_dyaop *dyaop;
+ struct au_dynop op;
+
+ op.dy_type = AuDy_AOP;
+ op.dy_haop = h_aop;
+ dyaop = (void *)dy_get(&op, br);
+ if (IS_ERR(dyaop))
+ goto out;
+ dy_adx(dyaop, do_dx);
+
+out:
+ return dyaop;
+}
+
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+ struct inode *h_inode)
+{
+ int err, do_dx;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_dyaop *dyaop;
+
+ AuDebugOn(!S_ISREG(h_inode->i_mode));
+ IiMustWriteLock(inode);
+
+ sb = inode->i_sb;
+ br = au_sbr(sb, bindex);
+ do_dx = !!au_opt_test(au_mntflags(sb), DIO);
+ dyaop = dy_aget(br, h_inode->i_mapping->a_ops, do_dx);
+ err = PTR_ERR(dyaop);
+ if (IS_ERR(dyaop))
+ /* unnecessary to call dy_fput() */
+ goto out;
+
+ err = 0;
+ inode->i_mapping->a_ops = &dyaop->da_op;
+
+out:
+ return err;
+}
+
+/*
+ * Is it safe to replace a_ops during the inode/file is in operation?
+ * Yes, I hope so.
+ */
+int au_dy_irefresh(struct inode *inode)
+{
+ int err;
+ aufs_bindex_t btop;
+ struct inode *h_inode;
+
+ err = 0;
+ if (S_ISREG(inode->i_mode)) {
+ btop = au_ibtop(inode);
+ h_inode = au_h_iptr(inode, btop);
+ err = au_dy_iaop(inode, btop, h_inode);
+ }
+ return err;
+}
+
+void au_dy_arefresh(int do_dx)
+{
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos;
+ struct au_dykey *key;
+
+ hbl = dynop + AuDy_AOP;
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry(key, pos, hbl, dk_hnode)
+ dy_adx((void *)key, do_dx);
+ hlist_bl_unlock(hbl);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __init au_dy_init(void)
+{
+ int i;
+
+ /* make sure that 'struct au_dykey *' can be any type */
+ BUILD_BUG_ON(offsetof(struct au_dyaop, da_key));
+
+ for (i = 0; i < AuDyLast; i++)
+ INIT_HLIST_BL_HEAD(dynop + i);
+}
+
+void au_dy_fin(void)
+{
+ int i;
+
+ for (i = 0; i < AuDyLast; i++)
+ WARN_ON(!hlist_bl_empty(dynop + i));
+}
diff --git a/fs/aufs/dynop.h b/fs/aufs/dynop.h
new file mode 100644
index 000000000000..f3301c2a6a9b
--- /dev/null
+++ b/fs/aufs/dynop.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * dynamically customizable operations (for regular files only)
+ */
+
+#ifndef __AUFS_DYNOP_H__
+#define __AUFS_DYNOP_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/kref.h>
+
+enum {AuDy_AOP, AuDyLast};
+
+struct au_dynop {
+ int dy_type;
+ union {
+ const void *dy_hop;
+ const struct address_space_operations *dy_haop;
+ };
+};
+
+struct au_dykey {
+ union {
+ struct hlist_bl_node dk_hnode;
+ struct rcu_head dk_rcu;
+ };
+ struct au_dynop dk_op;
+
+ /*
+ * during I am in the branch local array, kref is gotten. when the
+ * branch is removed, kref is put.
+ */
+ struct kref dk_kref;
+};
+
+/* stop unioning since their sizes are very different from each other */
+struct au_dyaop {
+ struct au_dykey da_key;
+ struct address_space_operations da_op; /* not const */
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dynop.c */
+struct au_branch;
+void au_dy_put(struct au_dykey *key);
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+ struct inode *h_inode);
+int au_dy_irefresh(struct inode *inode);
+void au_dy_arefresh(int do_dio);
+
+void __init au_dy_init(void);
+void au_dy_fin(void);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DYNOP_H__ */
diff --git a/fs/aufs/export.c b/fs/aufs/export.c
new file mode 100644
index 000000000000..1da2ca0a969b
--- /dev/null
+++ b/fs/aufs/export.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * export via nfs
+ */
+
+#include <linux/exportfs.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/random.h>
+#include <linux/writeback.h>
+#include "aufs.h"
+
+union conv {
+#ifdef CONFIG_AUFS_INO_T_64
+ __u32 a[2];
+#else
+ __u32 a[1];
+#endif
+ ino_t ino;
+};
+
+static ino_t decode_ino(__u32 *a)
+{
+ union conv u;
+
+ BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a));
+ u.a[0] = a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+ u.a[1] = a[1];
+#endif
+ return u.ino;
+}
+
+static void encode_ino(__u32 *a, ino_t ino)
+{
+ union conv u;
+
+ u.ino = ino;
+ a[0] = u.a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+ a[1] = u.a[1];
+#endif
+}
+
+/* NFS file handle */
+enum {
+ Fh_br_id,
+ Fh_sigen,
+#ifdef CONFIG_AUFS_INO_T_64
+ /* support 64bit inode number */
+ Fh_ino1,
+ Fh_ino2,
+ Fh_dir_ino1,
+ Fh_dir_ino2,
+#else
+ Fh_ino1,
+ Fh_dir_ino1,
+#endif
+ Fh_igen,
+ Fh_h_type,
+ Fh_tail,
+
+ Fh_ino = Fh_ino1,
+ Fh_dir_ino = Fh_dir_ino1
+};
+
+static int au_test_anon(struct dentry *dentry)
+{
+ /* note: read d_flags without d_lock */
+ return !!(dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+int au_test_nfsd(void)
+{
+ int ret;
+ struct task_struct *tsk = current;
+ char comm[sizeof(tsk->comm)];
+
+ ret = 0;
+ if (tsk->flags & PF_KTHREAD) {
+ get_task_comm(comm, tsk);
+ ret = !strcmp(comm, "nfsd");
+ }
+
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+/* inode generation external table */
+
+void au_xigen_inc(struct inode *inode)
+{
+ loff_t pos;
+ ssize_t sz;
+ __u32 igen;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+
+ sb = inode->i_sb;
+ AuDebugOn(!au_opt_test(au_mntflags(sb), XINO));
+
+ sbinfo = au_sbi(sb);
+ pos = inode->i_ino;
+ pos *= sizeof(igen);
+ igen = inode->i_generation + 1;
+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen,
+ sizeof(igen), &pos);
+ if (sz == sizeof(igen))
+ return; /* success */
+
+ if (unlikely(sz >= 0))
+ AuIOErr("xigen error (%zd)\n", sz);
+}
+
+int au_xigen_new(struct inode *inode)
+{
+ int err;
+ loff_t pos;
+ ssize_t sz;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+
+ err = 0;
+ /* todo: dirty, at mount time */
+ if (inode->i_ino == AUFS_ROOT_INO)
+ goto out;
+ sb = inode->i_sb;
+ SiMustAnyLock(sb);
+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+ goto out;
+
+ err = -EFBIG;
+ pos = inode->i_ino;
+ if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) {
+ AuIOErr1("too large i%lld\n", pos);
+ goto out;
+ }
+ pos *= sizeof(inode->i_generation);
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ file = sbinfo->si_xigen;
+ BUG_ON(!file);
+
+ if (vfsub_f_size_read(file)
+ < pos + sizeof(inode->i_generation)) {
+ inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next);
+ sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation,
+ sizeof(inode->i_generation), &pos);
+ } else
+ sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation,
+ sizeof(inode->i_generation), &pos);
+ if (sz == sizeof(inode->i_generation))
+ goto out; /* success */
+
+ err = sz;
+ if (unlikely(sz >= 0)) {
+ err = -EIO;
+ AuIOErr("xigen error (%zd)\n", sz);
+ }
+
+out:
+ return err;
+}
+
+int au_xigen_set(struct super_block *sb, struct path *path)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ file = au_xino_create2(sb, path, sbinfo->si_xigen);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+ err = 0;
+ if (sbinfo->si_xigen)
+ fput(sbinfo->si_xigen);
+ sbinfo->si_xigen = file;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+void au_xigen_clr(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ if (sbinfo->si_xigen) {
+ fput(sbinfo->si_xigen);
+ sbinfo->si_xigen = NULL;
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino,
+ ino_t dir_ino)
+{
+ struct dentry *dentry, *d;
+ struct inode *inode;
+ unsigned int sigen;
+
+ dentry = NULL;
+ inode = ilookup(sb, ino);
+ if (!inode)
+ goto out;
+
+ dentry = ERR_PTR(-ESTALE);
+ sigen = au_sigen(sb);
+ if (unlikely(au_is_bad_inode(inode)
+ || IS_DEADDIR(inode)
+ || sigen != au_iigen(inode, NULL)))
+ goto out_iput;
+
+ dentry = NULL;
+ if (!dir_ino || S_ISDIR(inode->i_mode))
+ dentry = d_find_alias(inode);
+ else {
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&d->d_lock);
+ if (!au_test_anon(d)
+ && d_inode(d->d_parent)->i_ino == dir_ino) {
+ dentry = dget_dlock(d);
+ spin_unlock(&d->d_lock);
+ break;
+ }
+ spin_unlock(&d->d_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ }
+ if (unlikely(dentry && au_digen_test(dentry, sigen))) {
+ /* need to refresh */
+ dput(dentry);
+ dentry = NULL;
+ }
+
+out_iput:
+ iput(inode);
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: dirty? */
+/* if exportfs_decode_fh() passed vfsmount*, we could be happy */
+
+struct au_compare_mnt_args {
+ /* input */
+ struct super_block *sb;
+
+ /* output */
+ struct vfsmount *mnt;
+};
+
+static int au_compare_mnt(struct vfsmount *mnt, void *arg)
+{
+ struct au_compare_mnt_args *a = arg;
+
+ if (mnt->mnt_sb != a->sb)
+ return 0;
+ a->mnt = mntget(mnt);
+ return 1;
+}
+
+static struct vfsmount *au_mnt_get(struct super_block *sb)
+{
+ int err;
+ struct path root;
+ struct au_compare_mnt_args args = {
+ .sb = sb
+ };
+
+ get_fs_root(current->fs, &root);
+ rcu_read_lock();
+ err = iterate_mounts(au_compare_mnt, &args, root.mnt);
+ rcu_read_unlock();
+ path_put(&root);
+ AuDebugOn(!err);
+ AuDebugOn(!args.mnt);
+ return args.mnt;
+}
+
+struct au_nfsd_si_lock {
+ unsigned int sigen;
+ aufs_bindex_t bindex, br_id;
+ unsigned char force_lock;
+};
+
+static int si_nfsd_read_lock(struct super_block *sb,
+ struct au_nfsd_si_lock *nsi_lock)
+{
+ int err;
+ aufs_bindex_t bindex;
+
+ si_read_lock(sb, AuLock_FLUSH);
+
+ /* branch id may be wrapped around */
+ err = 0;
+ bindex = au_br_index(sb, nsi_lock->br_id);
+ if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb))
+ goto out; /* success */
+
+ err = -ESTALE;
+ bindex = -1;
+ if (!nsi_lock->force_lock)
+ si_read_unlock(sb);
+
+out:
+ nsi_lock->bindex = bindex;
+ return err;
+}
+
+struct find_name_by_ino {
+ struct dir_context ctx;
+ int called, found;
+ ino_t ino;
+ char *name;
+ int namelen;
+};
+
+static int
+find_name_by_ino(struct dir_context *ctx, const char *name, int namelen,
+ loff_t offset, u64 ino, unsigned int d_type)
+{
+ struct find_name_by_ino *a = container_of(ctx, struct find_name_by_ino,
+ ctx);
+
+ a->called++;
+ if (a->ino != ino)
+ return 0;
+
+ memcpy(a->name, name, namelen);
+ a->namelen = namelen;
+ a->found = 1;
+ return 1;
+}
+
+static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino,
+ struct au_nfsd_si_lock *nsi_lock)
+{
+ struct dentry *dentry, *parent;
+ struct file *file;
+ struct inode *dir;
+ struct find_name_by_ino arg = {
+ .ctx = {
+ .actor = find_name_by_ino
+ }
+ };
+ int err;
+
+ parent = path->dentry;
+ if (nsi_lock)
+ si_read_unlock(parent->d_sb);
+ file = vfsub_dentry_open(path, au_dir_roflags);
+ dentry = (void *)file;
+ if (IS_ERR(file))
+ goto out;
+
+ dentry = ERR_PTR(-ENOMEM);
+ arg.name = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!arg.name))
+ goto out_file;
+ arg.ino = ino;
+ arg.found = 0;
+ do {
+ arg.called = 0;
+ /* smp_mb(); */
+ err = vfsub_iterate_dir(file, &arg.ctx);
+ } while (!err && !arg.found && arg.called);
+ dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_name;
+ /* instead of ENOENT */
+ dentry = ERR_PTR(-ESTALE);
+ if (!arg.found)
+ goto out_name;
+
+ /* do not call vfsub_lkup_one() */
+ dir = d_inode(parent);
+ dentry = vfsub_lookup_one_len_unlocked(arg.name, parent, arg.namelen);
+ AuTraceErrPtr(dentry);
+ if (IS_ERR(dentry))
+ goto out_name;
+ AuDebugOn(au_test_anon(dentry));
+ if (unlikely(d_really_is_negative(dentry))) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOENT);
+ }
+
+out_name:
+ free_page((unsigned long)arg.name);
+out_file:
+ fput(file);
+out:
+ if (unlikely(nsi_lock
+ && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0))
+ if (!IS_ERR(dentry)) {
+ dput(dentry);
+ dentry = ERR_PTR(-ESTALE);
+ }
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino,
+ ino_t dir_ino,
+ struct au_nfsd_si_lock *nsi_lock)
+{
+ struct dentry *dentry;
+ struct path path;
+
+ if (dir_ino != AUFS_ROOT_INO) {
+ path.dentry = decode_by_ino(sb, dir_ino, 0);
+ dentry = path.dentry;
+ if (!path.dentry || IS_ERR(path.dentry))
+ goto out;
+ AuDebugOn(au_test_anon(path.dentry));
+ } else
+ path.dentry = dget(sb->s_root);
+
+ path.mnt = au_mnt_get(sb);
+ dentry = au_lkup_by_ino(&path, ino, nsi_lock);
+ path_put(&path);
+
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int h_acceptable(void *expv, struct dentry *dentry)
+{
+ return 1;
+}
+
+static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath,
+ char *buf, int len, struct super_block *sb)
+{
+ char *p;
+ int n;
+ struct path path;
+
+ p = d_path(h_rootpath, buf, len);
+ if (IS_ERR(p))
+ goto out;
+ n = strlen(p);
+
+ path.mnt = h_rootpath->mnt;
+ path.dentry = h_parent;
+ p = d_path(&path, buf, len);
+ if (IS_ERR(p))
+ goto out;
+ if (n != 1)
+ p += n;
+
+ path.mnt = au_mnt_get(sb);
+ path.dentry = sb->s_root;
+ p = d_path(&path, buf, len - strlen(p));
+ mntput(path.mnt);
+ if (IS_ERR(p))
+ goto out;
+ if (n != 1)
+ p[strlen(p)] = '/';
+
+out:
+ AuTraceErrPtr(p);
+ return p;
+}
+
+static
+struct dentry *decode_by_path(struct super_block *sb, ino_t ino, __u32 *fh,
+ int fh_len, struct au_nfsd_si_lock *nsi_lock)
+{
+ struct dentry *dentry, *h_parent, *root;
+ struct super_block *h_sb;
+ char *pathname, *p;
+ struct vfsmount *h_mnt;
+ struct au_branch *br;
+ int err;
+ struct path path;
+
+ br = au_sbr(sb, nsi_lock->bindex);
+ h_mnt = au_br_mnt(br);
+ h_sb = h_mnt->mnt_sb;
+ /* todo: call lower fh_to_dentry()? fh_to_parent()? */
+ lockdep_off();
+ h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail),
+ fh_len - Fh_tail, fh[Fh_h_type],
+ h_acceptable, /*context*/NULL);
+ lockdep_on();
+ dentry = h_parent;
+ if (unlikely(!h_parent || IS_ERR(h_parent))) {
+ AuWarn1("%s decode_fh failed, %ld\n",
+ au_sbtype(h_sb), PTR_ERR(h_parent));
+ goto out;
+ }
+ dentry = NULL;
+ if (unlikely(au_test_anon(h_parent))) {
+ AuWarn1("%s decode_fh returned a disconnected dentry\n",
+ au_sbtype(h_sb));
+ goto out_h_parent;
+ }
+
+ dentry = ERR_PTR(-ENOMEM);
+ pathname = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!pathname))
+ goto out_h_parent;
+
+ root = sb->s_root;
+ path.mnt = h_mnt;
+ di_read_lock_parent(root, !AuLock_IR);
+ path.dentry = au_h_dptr(root, nsi_lock->bindex);
+ di_read_unlock(root, !AuLock_IR);
+ p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb);
+ dentry = (void *)p;
+ if (IS_ERR(p))
+ goto out_pathname;
+
+ si_read_unlock(sb);
+ err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+ dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_relock;
+
+ dentry = ERR_PTR(-ENOENT);
+ AuDebugOn(au_test_anon(path.dentry));
+ if (unlikely(d_really_is_negative(path.dentry)))
+ goto out_path;
+
+ if (ino != d_inode(path.dentry)->i_ino)
+ dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL);
+ else
+ dentry = dget(path.dentry);
+
+out_path:
+ path_put(&path);
+out_relock:
+ if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0))
+ if (!IS_ERR(dentry)) {
+ dput(dentry);
+ dentry = ERR_PTR(-ESTALE);
+ }
+out_pathname:
+ free_page((unsigned long)pathname);
+out_h_parent:
+ dput(h_parent);
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *
+aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len,
+ int fh_type)
+{
+ struct dentry *dentry;
+ __u32 *fh = fid->raw;
+ struct au_branch *br;
+ ino_t ino, dir_ino;
+ struct au_nfsd_si_lock nsi_lock = {
+ .force_lock = 0
+ };
+
+ dentry = ERR_PTR(-ESTALE);
+ /* it should never happen, but the file handle is unreliable */
+ if (unlikely(fh_len < Fh_tail))
+ goto out;
+ nsi_lock.sigen = fh[Fh_sigen];
+ nsi_lock.br_id = fh[Fh_br_id];
+
+ /* branch id may be wrapped around */
+ br = NULL;
+ if (unlikely(si_nfsd_read_lock(sb, &nsi_lock)))
+ goto out;
+ nsi_lock.force_lock = 1;
+
+ /* is this inode still cached? */
+ ino = decode_ino(fh + Fh_ino);
+ /* it should never happen */
+ if (unlikely(ino == AUFS_ROOT_INO))
+ goto out_unlock;
+
+ dir_ino = decode_ino(fh + Fh_dir_ino);
+ dentry = decode_by_ino(sb, ino, dir_ino);
+ if (IS_ERR(dentry))
+ goto out_unlock;
+ if (dentry)
+ goto accept;
+
+ /* is the parent dir cached? */
+ br = au_sbr(sb, nsi_lock.bindex);
+ au_lcnt_inc(&br->br_nfiles);
+ dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock);
+ if (IS_ERR(dentry))
+ goto out_unlock;
+ if (dentry)
+ goto accept;
+
+ /* lookup path */
+ dentry = decode_by_path(sb, ino, fh, fh_len, &nsi_lock);
+ if (IS_ERR(dentry))
+ goto out_unlock;
+ if (unlikely(!dentry))
+ /* todo?: make it ESTALE */
+ goto out_unlock;
+
+accept:
+ if (!au_digen_test(dentry, au_sigen(sb))
+ && d_inode(dentry)->i_generation == fh[Fh_igen])
+ goto out_unlock; /* success */
+
+ dput(dentry);
+ dentry = ERR_PTR(-ESTALE);
+out_unlock:
+ if (br)
+ au_lcnt_dec(&br->br_nfiles);
+ si_read_unlock(sb);
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+#if 0 /* reserved for future use */
+/* support subtreecheck option */
+static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ struct dentry *parent;
+ __u32 *fh = fid->raw;
+ ino_t dir_ino;
+
+ dir_ino = decode_ino(fh + Fh_dir_ino);
+ parent = decode_by_ino(sb, dir_ino, 0);
+ if (IS_ERR(parent))
+ goto out;
+ if (!parent)
+ parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]),
+ dir_ino, fh, fh_len);
+
+out:
+ AuTraceErrPtr(parent);
+ return parent;
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *dir)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct super_block *sb, *h_sb;
+ struct dentry *dentry, *parent, *h_parent;
+ struct inode *h_dir;
+ struct au_branch *br;
+
+ err = -ENOSPC;
+ if (unlikely(*max_len <= Fh_tail)) {
+ AuWarn1("NFSv2 client (max_len %d)?\n", *max_len);
+ goto out;
+ }
+
+ err = FILEID_ROOT;
+ if (inode->i_ino == AUFS_ROOT_INO) {
+ AuDebugOn(inode->i_ino != AUFS_ROOT_INO);
+ goto out;
+ }
+
+ h_parent = NULL;
+ sb = inode->i_sb;
+ err = si_read_lock(sb, AuLock_FLUSH);
+ if (unlikely(err))
+ goto out;
+
+#ifdef CONFIG_AUFS_DEBUG
+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+ AuWarn1("NFS-exporting requires xino\n");
+#endif
+ err = -EIO;
+ parent = NULL;
+ ii_read_lock_child(inode);
+ bindex = au_ibtop(inode);
+ if (!dir) {
+ dentry = d_find_any_alias(inode);
+ if (unlikely(!dentry))
+ goto out_unlock;
+ AuDebugOn(au_test_anon(dentry));
+ parent = dget_parent(dentry);
+ dput(dentry);
+ if (unlikely(!parent))
+ goto out_unlock;
+ if (d_really_is_positive(parent))
+ dir = d_inode(parent);
+ }
+
+ ii_read_lock_parent(dir);
+ h_dir = au_h_iptr(dir, bindex);
+ ii_read_unlock(dir);
+ if (unlikely(!h_dir))
+ goto out_parent;
+ h_parent = d_find_any_alias(h_dir);
+ if (unlikely(!h_parent))
+ goto out_hparent;
+
+ err = -EPERM;
+ br = au_sbr(sb, bindex);
+ h_sb = au_br_sb(br);
+ if (unlikely(!h_sb->s_export_op)) {
+ AuErr1("%s branch is not exportable\n", au_sbtype(h_sb));
+ goto out_hparent;
+ }
+
+ fh[Fh_br_id] = br->br_id;
+ fh[Fh_sigen] = au_sigen(sb);
+ encode_ino(fh + Fh_ino, inode->i_ino);
+ encode_ino(fh + Fh_dir_ino, dir->i_ino);
+ fh[Fh_igen] = inode->i_generation;
+
+ *max_len -= Fh_tail;
+ fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail),
+ max_len,
+ /*connectable or subtreecheck*/0);
+ err = fh[Fh_h_type];
+ *max_len += Fh_tail;
+ /* todo: macros? */
+ if (err != FILEID_INVALID)
+ err = 99;
+ else
+ AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb));
+
+out_hparent:
+ dput(h_parent);
+out_parent:
+ dput(parent);
+out_unlock:
+ ii_read_unlock(inode);
+ si_read_unlock(sb);
+out:
+ if (unlikely(err < 0))
+ err = FILEID_INVALID;
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_commit_metadata(struct inode *inode)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct super_block *sb;
+ struct inode *h_inode;
+ int (*f)(struct inode *inode);
+
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+ ii_write_lock_child(inode);
+ bindex = au_ibtop(inode);
+ AuDebugOn(bindex < 0);
+ h_inode = au_h_iptr(inode, bindex);
+
+ f = h_inode->i_sb->s_export_op->commit_metadata;
+ if (f)
+ err = f(h_inode);
+ else {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0 /* metadata only */
+ };
+
+ err = sync_inode(h_inode, &wbc);
+ }
+
+ au_cpup_attr_timesizes(inode);
+ ii_write_unlock(inode);
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct export_operations aufs_export_op = {
+ .fh_to_dentry = aufs_fh_to_dentry,
+ /* .fh_to_parent = aufs_fh_to_parent, */
+ .encode_fh = aufs_encode_fh,
+ .commit_metadata = aufs_commit_metadata
+};
+
+void au_export_init(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+ __u32 u;
+
+ BUILD_BUG_ON_MSG(IS_BUILTIN(CONFIG_AUFS_FS)
+ && IS_MODULE(CONFIG_EXPORTFS),
+ AUFS_NAME ": unsupported configuration "
+ "CONFIG_EXPORTFS=m and CONFIG_AUFS_FS=y");
+
+ sb->s_export_op = &aufs_export_op;
+ sbinfo = au_sbi(sb);
+ sbinfo->si_xigen = NULL;
+ get_random_bytes(&u, sizeof(u));
+ BUILD_BUG_ON(sizeof(u) != sizeof(int));
+ atomic_set(&sbinfo->si_xigen_next, u);
+}
diff --git a/fs/aufs/f b/fs/aufs/f
new file mode 100644
index 000000000000..500a2ab32cb6
--- /dev/null
+++ b/fs/aufs/f
@@ -0,0 +1,298 @@
+--- /home/bruce/poky/build/tmp/work-shared/qemux86-64/kernel-source/fs/aufs/i_op.c 2017-12-23 23:06:53.241340968 -0500
++++ i_op.c 2017-12-23 23:14:58.070185032 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2005-2016 Junjiro R. Okajima
++ * Copyright (C) 2005-2017 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -31,12 +31,15 @@
+ int err;
+ const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+
++ err = -EPERM;
++ if (write_mask && IS_IMMUTABLE(h_inode))
++ goto out;
++
+ err = -EACCES;
+- if ((write_mask && IS_IMMUTABLE(h_inode))
+- || ((mask & MAY_EXEC)
+- && S_ISREG(h_inode->i_mode)
+- && (path_noexec(h_path)
+- || !(h_inode->i_mode & S_IXUGO))))
++ if (((mask & MAY_EXEC)
++ && S_ISREG(h_inode->i_mode)
++ && (path_noexec(h_path)
++ || !(h_inode->i_mode & S_IXUGO))))
+ goto out;
+
+ /*
+@@ -249,27 +252,28 @@
+ /* ---------------------------------------------------------------------- */
+
+ struct aopen_node {
+- struct hlist_node hlist;
++ struct hlist_bl_node hblist;
+ struct file *file, *h_file;
+ };
+
+ static int au_do_aopen(struct inode *inode, struct file *file)
+ {
+- struct au_sphlhead *aopen;
++ struct hlist_bl_head *aopen;
++ struct hlist_bl_node *pos;
+ struct aopen_node *node;
+ struct au_do_open_args args = {
+- .no_lock = 1,
+- .open = au_do_open_nondir
++ .aopen = 1,
++ .open = au_do_open_nondir
+ };
+
+ aopen = &au_sbi(inode->i_sb)->si_aopen;
+- spin_lock(&aopen->spin);
+- hlist_for_each_entry(node, &aopen->head, hlist)
++ hlist_bl_lock(aopen);
++ hlist_bl_for_each_entry(node, pos, aopen, hblist)
+ if (node->file == file) {
+ args.h_file = node->h_file;
+ break;
+ }
+- spin_unlock(&aopen->spin);
++ hlist_bl_unlock(aopen);
+ /* AuDebugOn(!args.h_file); */
+
+ return au_do_open(file, &args);
+@@ -279,10 +283,10 @@
+ struct file *file, unsigned int open_flag,
+ umode_t create_mode, int *opened)
+ {
+- int err, h_opened = *opened;
++ int err, unlocked, h_opened = *opened;
+ unsigned int lkup_flags;
+ struct dentry *parent, *d;
+- struct au_sphlhead *aopen;
++ struct hlist_bl_head *aopen;
+ struct vfsub_aopen_args args = {
+ .open_flag = open_flag,
+ .create_mode = create_mode,
+@@ -324,6 +328,7 @@
+ || !(open_flag & O_CREAT))
+ goto out_no_open;
+
++ unlocked = 0;
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+ if (unlikely(err))
+ goto out;
+@@ -354,6 +359,9 @@
+ put_filp(args.file);
+ goto out_unlock;
+ }
++ di_write_unlock(parent);
++ di_write_unlock(dentry);
++ unlocked = 1;
+
+ /* some filesystems don't set FILE_CREATED while succeeded? */
+ *opened |= FILE_CREATED;
+@@ -364,17 +372,21 @@
+ args.file = NULL;
+ }
+ aopen = &au_sbi(dir->i_sb)->si_aopen;
+- au_sphl_add(&aopen_node.hlist, aopen);
++ au_hbl_add(&aopen_node.hblist, aopen);
+ err = finish_open(file, dentry, au_do_aopen, opened);
+- au_sphl_del(&aopen_node.hlist, aopen);
++ au_hbl_del(&aopen_node.hblist, aopen);
+ AuTraceErr(err);
+ AuDbgFile(file);
+ if (aopen_node.h_file)
+ fput(aopen_node.h_file);
+
+ out_unlock:
+- di_write_unlock(parent);
+- aufs_read_unlock(dentry, AuLock_DW);
++ if (unlocked)
++ si_read_unlock(dentry->d_sb);
++ else {
++ di_write_unlock(parent);
++ aufs_read_unlock(dentry, AuLock_DW);
++ }
+ AuDbgDentry(dentry);
+ if (unlikely(err < 0))
+ goto out;
+@@ -420,10 +432,10 @@
+ if (!err && add_entry && !au_ftest_wrdir(add_entry, TMPFILE)) {
+ h_parent = au_h_dptr(parent, bcpup);
+ h_dir = d_inode(h_parent);
+- inode_lock_nested(h_dir, AuLsc_I_PARENT);
++ vfsub_inode_lock_shared_nested(h_dir, AuLsc_I_PARENT);
+ err = au_lkup_neg(dentry, bcpup, /*wh*/0);
+ /* todo: no unlock here */
+- inode_unlock(h_dir);
++ inode_unlock_shared(h_dir);
+
+ AuDbg("bcpup %d\n", bcpup);
+ if (!err) {
+@@ -807,10 +819,10 @@
+ a->h_path.dentry = au_h_dptr(dentry, btop);
+ a->h_inode = d_inode(a->h_path.dentry);
+ if (ia && (ia->ia_valid & ATTR_SIZE)) {
+- inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
++ vfsub_inode_lock_shared_nested(a->h_inode, AuLsc_I_CHILD);
+ if (ia->ia_size < i_size_read(a->h_inode))
+ sz = ia->ia_size;
+- inode_unlock(a->h_inode);
++ inode_unlock_shared(a->h_inode);
+ }
+
+ hi_wh = NULL;
+@@ -885,6 +897,10 @@
+ inode = d_inode(dentry);
+ IMustLock(inode);
+
++ err = setattr_prepare(dentry, ia);
++ if (unlikely(err))
++ goto out;
++
+ err = -ENOMEM;
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+@@ -903,7 +919,8 @@
+ /* currently ftruncate(2) only */
+ AuDebugOn(!d_is_reg(dentry));
+ file = ia->ia_file;
+- err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
++ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1,
++ /*fi_lsc*/0);
+ if (unlikely(err))
+ goto out_si;
+ ia->ia_file = au_hf_top(file);
+@@ -993,7 +1010,7 @@
+ out_si:
+ si_read_unlock(sb);
+ out_kfree:
+- au_delayed_kfree(a);
++ kfree(a);
+ out:
+ AuTraceErr(err);
+ return err;
+@@ -1028,8 +1045,8 @@
+ return err;
+ }
+
+-ssize_t au_srxattr(struct dentry *dentry, struct inode *inode,
+- struct au_srxattr *arg)
++ssize_t au_sxattr(struct dentry *dentry, struct inode *inode,
++ struct au_sxattr *arg)
+ {
+ int err;
+ struct path h_path;
+@@ -1063,13 +1080,11 @@
+ arg->u.set.name, arg->u.set.value,
+ arg->u.set.size, arg->u.set.flags);
+ break;
+- case AU_XATTR_REMOVE:
+- err = vfsub_removexattr(h_path.dentry, arg->u.remove.name);
+- break;
+ case AU_ACL_SET:
+ err = -EOPNOTSUPP;
+ h_inode = d_inode(h_path.dentry);
+ if (h_inode->i_op->set_acl)
++ /* this will call posix_acl_update_mode */
+ err = h_inode->i_op->set_acl(h_inode,
+ arg->u.acl_set.acl,
+ arg->u.acl_set.type);
+@@ -1086,7 +1101,7 @@
+ di_write_unlock(dentry);
+ si_read_unlock(sb);
+ out_kfree:
+- au_delayed_kfree(a);
++ kfree(a);
+ out:
+ AuTraceErr(err);
+ return err;
+@@ -1123,11 +1138,12 @@
+ }
+
+ /*
+- * common routine for aufs_getattr() and aufs_getxattr().
++ * common routine for aufs_getattr() and au_getxattr().
+ * returns zero or negative (an error).
+ * @dentry will be read-locked in success.
+ */
+-int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path)
++int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path,
++ int locked)
+ {
+ int err;
+ unsigned int mnt_flags, sigen;
+@@ -1144,6 +1160,9 @@
+ mnt_flags = au_mntflags(sb);
+ udba_none = !!au_opt_test(mnt_flags, UDBA_NONE);
+
++ if (unlikely(locked))
++ goto body; /* skip locking dinfo */
++
+ /* support fstat(2) */
+ if (!d_unlinked(dentry) && !udba_none) {
+ sigen = au_sigen(sb);
+@@ -1171,6 +1190,7 @@
+ } else
+ di_read_lock_child(dentry, AuLock_IR);
+
++body:
+ inode = d_inode(dentry);
+ bindex = au_ibtop(inode);
+ h_path->mnt = au_sbr_mnt(sb, bindex);
+@@ -1209,7 +1229,7 @@
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out;
+- err = au_h_path_getattr(dentry, /*force*/0, &h_path);
++ err = au_h_path_getattr(dentry, /*force*/0, &h_path, /*locked*/0);
+ if (unlikely(err))
+ goto out_si;
+ if (unlikely(!h_path.dentry))
+@@ -1290,7 +1310,7 @@
+ err = 0;
+ AuDbg("%pf\n", h_inode->i_op->get_link);
+ AuDbgDentry(h_dentry);
+- ret = h_inode->i_op->get_link(h_dentry, h_inode, done);
++ ret = vfs_get_link(h_dentry, done);
+ dput(h_dentry);
+ if (IS_ERR(ret))
+ err = PTR_ERR(ret);
+@@ -1384,10 +1404,7 @@
+ .getattr = aufs_getattr,
+
+ #ifdef CONFIG_AUFS_XATTR
+- .setxattr = aufs_setxattr,
+- .getxattr = aufs_getxattr,
+ .listxattr = aufs_listxattr,
+- .removexattr = aufs_removexattr,
+ #endif
+
+ .readlink = generic_readlink,
+@@ -1416,10 +1433,7 @@
+ .getattr = aufs_getattr,
+
+ #ifdef CONFIG_AUFS_XATTR
+- .setxattr = aufs_setxattr,
+- .getxattr = aufs_getxattr,
+ .listxattr = aufs_listxattr,
+- .removexattr = aufs_removexattr,
+ #endif
+
+ .update_time = aufs_update_time,
+@@ -1437,10 +1451,7 @@
+ .getattr = aufs_getattr,
+
+ #ifdef CONFIG_AUFS_XATTR
+- .setxattr = aufs_setxattr,
+- .getxattr = aufs_getxattr,
+ .listxattr = aufs_listxattr,
+- .removexattr = aufs_removexattr,
+ #endif
+
+ .update_time = aufs_update_time
diff --git a/fs/aufs/f_op.c b/fs/aufs/f_op.c
new file mode 100644
index 000000000000..5254b5cd3942
--- /dev/null
+++ b/fs/aufs/f_op.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * file and vm operations
+ */
+
+#include <linux/aio.h>
+#include <linux/fs_stack.h>
+#include <linux/mman.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+int au_do_open_nondir(struct file *file, int flags, struct file *h_file)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct dentry *dentry, *h_dentry;
+ struct au_finfo *finfo;
+ struct inode *h_inode;
+
+ FiMustWriteLock(file);
+
+ err = 0;
+ dentry = file->f_path.dentry;
+ AuDebugOn(IS_ERR_OR_NULL(dentry));
+ finfo = au_fi(file);
+ memset(&finfo->fi_htop, 0, sizeof(finfo->fi_htop));
+ atomic_set(&finfo->fi_mmapped, 0);
+ bindex = au_dbtop(dentry);
+ if (!h_file) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb);
+ if (unlikely(err))
+ goto out;
+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+ if (IS_ERR(h_file)) {
+ err = PTR_ERR(h_file);
+ goto out;
+ }
+ } else {
+ h_dentry = h_file->f_path.dentry;
+ err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb);
+ if (unlikely(err))
+ goto out;
+ /* br ref is already inc-ed */
+ }
+
+ if ((flags & __O_TMPFILE)
+ && !(flags & O_EXCL)) {
+ h_inode = file_inode(h_file);
+ spin_lock(&h_inode->i_lock);
+ h_inode->i_state |= I_LINKABLE;
+ spin_unlock(&h_inode->i_lock);
+ }
+ au_set_fbtop(file, bindex);
+ au_set_h_fptr(file, bindex, h_file);
+ au_update_figen(file);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+
+out:
+ return err;
+}
+
+static int aufs_open_nondir(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ int err;
+ struct super_block *sb;
+ struct au_do_open_args args = {
+ .open = au_do_open_nondir
+ };
+
+ AuDbg("%pD, f_flags 0x%x, f_mode 0x%x\n",
+ file, vfsub_file_flags(file), file->f_mode);
+
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ err = au_do_open(file, &args);
+ si_read_unlock(sb);
+ return err;
+}
+
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file)
+{
+ struct au_finfo *finfo;
+ aufs_bindex_t bindex;
+
+ finfo = au_fi(file);
+ au_hbl_del(&finfo->fi_hlist,
+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
+ bindex = finfo->fi_btop;
+ if (bindex >= 0)
+ au_set_h_fptr(file, bindex, NULL);
+
+ au_finfo_fin(file);
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_nondir(struct file *file, fl_owner_t id)
+{
+ int err;
+ struct file *h_file;
+
+ err = 0;
+ h_file = au_hf_top(file);
+ if (h_file)
+ err = vfsub_flush(h_file, id);
+ return err;
+}
+
+static int aufs_flush_nondir(struct file *file, fl_owner_t id)
+{
+ return au_do_flush(file, id, au_do_flush_nondir);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * read and write functions acquire [fdi]_rwsem once, but release before
+ * mmap_sem. This is because to stop a race condition between mmap(2).
+ * Releasing these aufs-rwsem should be safe, no branch-management (by keeping
+ * si_rwsem), no harmful copy-up should happen. Actually copy-up may happen in
+ * read functions after [fdi]_rwsem are released, but it should be harmless.
+ */
+
+/* Callers should call au_read_post() or fput() in the end */
+struct file *au_read_pre(struct file *file, int keep_fi, unsigned int lsc)
+{
+ struct file *h_file;
+ int err;
+
+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0, lsc);
+ if (!err) {
+ di_read_unlock(file->f_path.dentry, AuLock_IR);
+ h_file = au_hf_top(file);
+ get_file(h_file);
+ if (!keep_fi)
+ fi_read_unlock(file);
+ } else
+ h_file = ERR_PTR(err);
+
+ return h_file;
+}
+
+static void au_read_post(struct inode *inode, struct file *h_file)
+{
+ /* update without lock, I don't think it a problem */
+ fsstack_copy_attr_atime(inode, file_inode(h_file));
+ fput(h_file);
+}
+
+struct au_write_pre {
+ /* input */
+ unsigned int lsc;
+
+ /* output */
+ blkcnt_t blks;
+ aufs_bindex_t btop;
+};
+
+/*
+ * return with iinfo is write-locked
+ * callers should call au_write_post() or iinfo_write_unlock() + fput() in the
+ * end
+ */
+static struct file *au_write_pre(struct file *file, int do_ready,
+ struct au_write_pre *wpre)
+{
+ struct file *h_file;
+ struct dentry *dentry;
+ int err;
+ unsigned int lsc;
+ struct au_pin pin;
+
+ lsc = 0;
+ if (wpre)
+ lsc = wpre->lsc;
+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1, lsc);
+ h_file = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ dentry = file->f_path.dentry;
+ if (do_ready) {
+ err = au_ready_to_write(file, -1, &pin);
+ if (unlikely(err)) {
+ h_file = ERR_PTR(err);
+ di_write_unlock(dentry);
+ goto out_fi;
+ }
+ }
+
+ di_downgrade_lock(dentry, /*flags*/0);
+ if (wpre)
+ wpre->btop = au_fbtop(file);
+ h_file = au_hf_top(file);
+ get_file(h_file);
+ if (wpre)
+ wpre->blks = file_inode(h_file)->i_blocks;
+ if (do_ready)
+ au_unpin(&pin);
+ di_read_unlock(dentry, /*flags*/0);
+
+out_fi:
+ fi_write_unlock(file);
+out:
+ return h_file;
+}
+
+static void au_write_post(struct inode *inode, struct file *h_file,
+ struct au_write_pre *wpre, ssize_t written)
+{
+ struct inode *h_inode;
+
+ au_cpup_attr_timesizes(inode);
+ AuDebugOn(au_ibtop(inode) != wpre->btop);
+ h_inode = file_inode(h_file);
+ inode->i_mode = h_inode->i_mode;
+ ii_write_unlock(inode);
+ /* AuDbg("blks %llu, %llu\n", (u64)blks, (u64)h_inode->i_blocks); */
+ if (written > 0)
+ au_fhsm_wrote(inode->i_sb, wpre->btop,
+ /*force*/h_inode->i_blocks > wpre->blks);
+ fput(h_file);
+}
+
+static ssize_t aufs_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t err;
+ struct inode *inode;
+ struct file *h_file;
+ struct super_block *sb;
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ /* filedata may be obsoleted by concurrent copyup, but no problem */
+ err = vfsub_read_u(h_file, buf, count, ppos);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ au_read_post(inode, h_file);
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+/*
+ * todo: very ugly
+ * it locks both of i_mutex and si_rwsem for read in safe.
+ * if the plink maintenance mode continues forever (that is the problem),
+ * may loop forever.
+ */
+static void au_mtx_and_read_lock(struct inode *inode)
+{
+ int err;
+ struct super_block *sb = inode->i_sb;
+
+ while (1) {
+ inode_lock(inode);
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (!err)
+ break;
+ inode_unlock(inode);
+ si_read_lock(sb, AuLock_NOPLMW);
+ si_read_unlock(sb);
+ }
+}
+
+static ssize_t aufs_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *h_file;
+ char __user *buf = (char __user *)ubuf;
+
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ wpre.lsc = 0;
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = vfsub_write_u(h_file, buf, count, ppos);
+ au_write_post(inode, h_file, &wpre, err);
+
+out:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+ return err;
+}
+
+static ssize_t au_do_iter(struct file *h_file, int rw, struct kiocb *kio,
+ struct iov_iter *iov_iter)
+{
+ ssize_t err;
+ struct file *file;
+ ssize_t (*iter)(struct kiocb *, struct iov_iter *);
+
+ err = security_file_permission(h_file, rw);
+ if (unlikely(err))
+ goto out;
+
+ err = -ENOSYS;
+ iter = NULL;
+ if (rw == MAY_READ)
+ iter = h_file->f_op->read_iter;
+ else if (rw == MAY_WRITE)
+ iter = h_file->f_op->write_iter;
+
+ file = kio->ki_filp;
+ kio->ki_filp = h_file;
+ if (iter) {
+ lockdep_off();
+ err = iter(kio, iov_iter);
+ lockdep_on();
+ } else
+ /* currently there is no such fs */
+ WARN_ON_ONCE(1);
+ kio->ki_filp = file;
+
+out:
+ return err;
+}
+
+static ssize_t aufs_read_iter(struct kiocb *kio, struct iov_iter *iov_iter)
+{
+ ssize_t err;
+ struct file *file, *h_file;
+ struct inode *inode;
+ struct super_block *sb;
+
+ file = kio->ki_filp;
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/1, /*lsc*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ if (au_test_loopback_kthread()) {
+ au_warn_loopback(h_file->f_path.dentry->d_sb);
+ if (file->f_mapping != h_file->f_mapping) {
+ file->f_mapping = h_file->f_mapping;
+ smp_mb(); /* unnecessary? */
+ }
+ }
+ fi_read_unlock(file);
+
+ err = au_do_iter(h_file, MAY_READ, kio, iov_iter);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ au_read_post(inode, h_file);
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+static ssize_t aufs_write_iter(struct kiocb *kio, struct iov_iter *iov_iter)
+{
+ ssize_t err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *file, *h_file;
+
+ file = kio->ki_filp;
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ wpre.lsc = 0;
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = au_do_iter(h_file, MAY_WRITE, kio, iov_iter);
+ au_write_post(inode, h_file, &wpre, err);
+
+out:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+ return err;
+}
+
+static ssize_t aufs_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ ssize_t err;
+ struct file *h_file;
+ struct inode *inode;
+ struct super_block *sb;
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = vfsub_splice_to(h_file, ppos, pipe, len, flags);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ au_read_post(inode, h_file);
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+static ssize_t
+aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos,
+ size_t len, unsigned int flags)
+{
+ ssize_t err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *h_file;
+
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ wpre.lsc = 0;
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = vfsub_splice_from(pipe, h_file, ppos, len, flags);
+ au_write_post(inode, h_file, &wpre, err);
+
+out:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+ return err;
+}
+
+static long aufs_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len)
+{
+ long err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *h_file;
+
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ wpre.lsc = 0;
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ lockdep_off();
+ err = vfs_fallocate(h_file, mode, offset, len);
+ lockdep_on();
+ au_write_post(inode, h_file, &wpre, /*written*/1);
+
+out:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+ return err;
+}
+
+static ssize_t aufs_copy_file_range(struct file *src, loff_t src_pos,
+ struct file *dst, loff_t dst_pos,
+ size_t len, unsigned int flags)
+{
+ ssize_t err;
+ struct au_write_pre wpre;
+ enum { SRC, DST };
+ struct {
+ struct inode *inode;
+ struct file *h_file;
+ struct super_block *h_sb;
+ } a[2];
+#define a_src a[SRC]
+#define a_dst a[DST]
+
+ err = -EINVAL;
+ a_src.inode = file_inode(src);
+ if (unlikely(!S_ISREG(a_src.inode->i_mode)))
+ goto out;
+ a_dst.inode = file_inode(dst);
+ if (unlikely(!S_ISREG(a_dst.inode->i_mode)))
+ goto out;
+
+ au_mtx_and_read_lock(a_dst.inode);
+ /*
+ * in order to match the order in di_write_lock2_{child,parent}(),
+ * use f_path.dentry for this comparison.
+ */
+ if (src->f_path.dentry < dst->f_path.dentry) {
+ a_src.h_file = au_read_pre(src, /*keep_fi*/1, AuLsc_FI_1);
+ err = PTR_ERR(a_src.h_file);
+ if (IS_ERR(a_src.h_file))
+ goto out_si;
+
+ wpre.lsc = AuLsc_FI_2;
+ a_dst.h_file = au_write_pre(dst, /*do_ready*/1, &wpre);
+ err = PTR_ERR(a_dst.h_file);
+ if (IS_ERR(a_dst.h_file)) {
+ au_read_post(a_src.inode, a_src.h_file);
+ goto out_si;
+ }
+ } else {
+ wpre.lsc = AuLsc_FI_1;
+ a_dst.h_file = au_write_pre(dst, /*do_ready*/1, &wpre);
+ err = PTR_ERR(a_dst.h_file);
+ if (IS_ERR(a_dst.h_file))
+ goto out_si;
+
+ a_src.h_file = au_read_pre(src, /*keep_fi*/1, AuLsc_FI_2);
+ err = PTR_ERR(a_src.h_file);
+ if (IS_ERR(a_src.h_file)) {
+ au_write_post(a_dst.inode, a_dst.h_file, &wpre,
+ /*written*/0);
+ goto out_si;
+ }
+ }
+
+ err = -EXDEV;
+ a_src.h_sb = file_inode(a_src.h_file)->i_sb;
+ a_dst.h_sb = file_inode(a_dst.h_file)->i_sb;
+ if (unlikely(a_src.h_sb != a_dst.h_sb)) {
+ AuDbgFile(src);
+ AuDbgFile(dst);
+ goto out_file;
+ }
+
+ err = vfsub_copy_file_range(a_src.h_file, src_pos, a_dst.h_file,
+ dst_pos, len, flags);
+
+out_file:
+ au_write_post(a_dst.inode, a_dst.h_file, &wpre, err);
+ fi_read_unlock(src);
+ au_read_post(a_src.inode, a_src.h_file);
+out_si:
+ si_read_unlock(a_dst.inode->i_sb);
+ inode_unlock(a_dst.inode);
+out:
+ return err;
+#undef a_src
+#undef a_dst
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * The locking order around current->mmap_sem.
+ * - in most and regular cases
+ * file I/O syscall -- aufs_read() or something
+ * -- si_rwsem for read -- mmap_sem
+ * (Note that [fdi]i_rwsem are released before mmap_sem).
+ * - in mmap case
+ * mmap(2) -- mmap_sem -- aufs_mmap() -- si_rwsem for read -- [fdi]i_rwsem
+ * This AB-BA order is definitely bad, but is not a problem since "si_rwsem for
+ * read" allows multiple processes to acquire it and [fdi]i_rwsem are not held
+ * in file I/O. Aufs needs to stop lockdep in aufs_mmap() though.
+ * It means that when aufs acquires si_rwsem for write, the process should never
+ * acquire mmap_sem.
+ *
+ * Actually aufs_iterate() holds [fdi]i_rwsem before mmap_sem, but this is not a
+ * problem either since any directory is not able to be mmap-ed.
+ * The similar scenario is applied to aufs_readlink() too.
+ */
+
+#if 0 /* stop calling security_file_mmap() */
+/* cf. linux/include/linux/mman.h: calc_vm_prot_bits() */
+#define AuConv_VM_PROT(f, b) _calc_vm_trans(f, VM_##b, PROT_##b)
+
+static unsigned long au_arch_prot_conv(unsigned long flags)
+{
+ /* currently ppc64 only */
+#ifdef CONFIG_PPC64
+ /* cf. linux/arch/powerpc/include/asm/mman.h */
+ AuDebugOn(arch_calc_vm_prot_bits(-1) != VM_SAO);
+ return AuConv_VM_PROT(flags, SAO);
+#else
+ AuDebugOn(arch_calc_vm_prot_bits(-1));
+ return 0;
+#endif
+}
+
+static unsigned long au_prot_conv(unsigned long flags)
+{
+ return AuConv_VM_PROT(flags, READ)
+ | AuConv_VM_PROT(flags, WRITE)
+ | AuConv_VM_PROT(flags, EXEC)
+ | au_arch_prot_conv(flags);
+}
+
+/* cf. linux/include/linux/mman.h: calc_vm_flag_bits() */
+#define AuConv_VM_MAP(f, b) _calc_vm_trans(f, VM_##b, MAP_##b)
+
+static unsigned long au_flag_conv(unsigned long flags)
+{
+ return AuConv_VM_MAP(flags, GROWSDOWN)
+ | AuConv_VM_MAP(flags, DENYWRITE)
+ | AuConv_VM_MAP(flags, LOCKED);
+}
+#endif
+
+static int aufs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int err;
+ const unsigned char wlock
+ = (file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED);
+ struct super_block *sb;
+ struct file *h_file;
+ struct inode *inode;
+
+ AuDbgVmRegion(file, vma);
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ lockdep_off();
+ si_read_lock(sb, AuLock_NOPLMW);
+
+ h_file = au_write_pre(file, wlock, /*wpre*/NULL);
+ lockdep_on();
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = 0;
+ au_set_mmapped(file);
+ au_vm_file_reset(vma, h_file);
+ /*
+ * we cannot call security_mmap_file() here since it may acquire
+ * mmap_sem or i_mutex.
+ *
+ * err = security_mmap_file(h_file, au_prot_conv(vma->vm_flags),
+ * au_flag_conv(vma->vm_flags));
+ */
+ if (!err)
+ err = call_mmap(h_file, vma);
+ if (!err) {
+ au_vm_prfile_set(vma, file);
+ fsstack_copy_attr_atime(inode, file_inode(h_file));
+ goto out_fput; /* success */
+ }
+ au_unset_mmapped(file);
+ au_vm_file_reset(vma, file);
+
+out_fput:
+ lockdep_off();
+ ii_write_unlock(inode);
+ lockdep_on();
+ fput(h_file);
+out:
+ lockdep_off();
+ si_read_unlock(sb);
+ lockdep_on();
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_fsync_nondir(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ int err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *h_file;
+
+ err = 0; /* -EBADF; */ /* posix? */
+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
+ goto out;
+
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ wpre.lsc = 0;
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out_unlock;
+
+ err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+ au_write_post(inode, h_file, &wpre, /*written*/0);
+
+out_unlock:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+out:
+ return err;
+}
+
+static int aufs_fasync(int fd, struct file *file, int flag)
+{
+ int err;
+ struct file *h_file;
+ struct super_block *sb;
+
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ if (h_file->f_op->fasync)
+ err = h_file->f_op->fasync(fd, h_file, flag);
+ fput(h_file); /* instead of au_read_post() */
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+static int aufs_setfl(struct file *file, unsigned long arg)
+{
+ int err;
+ struct file *h_file;
+ struct super_block *sb;
+
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ /* stop calling h_file->fasync */
+ arg |= vfsub_file_flags(file) & FASYNC;
+ err = setfl(/*unused fd*/-1, h_file, arg);
+ fput(h_file); /* instead of au_read_post() */
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* no one supports this operation, currently */
+#if 0
+static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset,
+ size_t len, loff_t *pos, int more)
+{
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_file_fop = {
+ .owner = THIS_MODULE,
+
+ .llseek = default_llseek,
+
+ .read = aufs_read,
+ .write = aufs_write,
+ .read_iter = aufs_read_iter,
+ .write_iter = aufs_write_iter,
+
+#ifdef CONFIG_AUFS_POLL
+ .poll = aufs_poll,
+#endif
+ .unlocked_ioctl = aufs_ioctl_nondir,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = aufs_compat_ioctl_nondir,
+#endif
+ .mmap = aufs_mmap,
+ .open = aufs_open_nondir,
+ .flush = aufs_flush_nondir,
+ .release = aufs_release_nondir,
+ .fsync = aufs_fsync_nondir,
+ .fasync = aufs_fasync,
+ /* .sendpage = aufs_sendpage, */
+ .setfl = aufs_setfl,
+ .splice_write = aufs_splice_write,
+ .splice_read = aufs_splice_read,
+#if 0
+ .aio_splice_write = aufs_aio_splice_write,
+ .aio_splice_read = aufs_aio_splice_read,
+#endif
+ .fallocate = aufs_fallocate,
+ .copy_file_range = aufs_copy_file_range
+};
diff --git a/fs/aufs/fhsm.c b/fs/aufs/fhsm.c
new file mode 100644
index 000000000000..f86a4c24fb26
--- /dev/null
+++ b/fs/aufs/fhsm.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * File-based Hierarchy Storage Management
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/poll.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+static aufs_bindex_t au_fhsm_bottom(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ AuDebugOn(!fhsm);
+ return fhsm->fhsm_bottom;
+}
+
+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex)
+{
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ AuDebugOn(!fhsm);
+ fhsm->fhsm_bottom = bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_fhsm_test_jiffy(struct au_sbinfo *sbinfo, struct au_branch *br)
+{
+ struct au_br_fhsm *bf;
+
+ bf = br->br_fhsm;
+ MtxMustLock(&bf->bf_lock);
+
+ return !bf->bf_readable
+ || time_after(jiffies,
+ bf->bf_jiffy + sbinfo->si_fhsm.fhsm_expire);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_fhsm_notify(struct super_block *sb, int val)
+{
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ if (au_fhsm_pid(fhsm)
+ && atomic_read(&fhsm->fhsm_readable) != -1) {
+ atomic_set(&fhsm->fhsm_readable, val);
+ if (val)
+ wake_up(&fhsm->fhsm_wqh);
+ }
+}
+
+static int au_fhsm_stfs(struct super_block *sb, aufs_bindex_t bindex,
+ struct aufs_stfs *rstfs, int do_lock, int do_notify)
+{
+ int err;
+ struct au_branch *br;
+ struct au_br_fhsm *bf;
+
+ br = au_sbr(sb, bindex);
+ AuDebugOn(au_br_rdonly(br));
+ bf = br->br_fhsm;
+ AuDebugOn(!bf);
+
+ if (do_lock)
+ mutex_lock(&bf->bf_lock);
+ else
+ MtxMustLock(&bf->bf_lock);
+
+ /* sb->s_root for NFS is unreliable */
+ err = au_br_stfs(br, &bf->bf_stfs);
+ if (unlikely(err)) {
+ AuErr1("FHSM failed (%d), b%d, ignored.\n", bindex, err);
+ goto out;
+ }
+
+ bf->bf_jiffy = jiffies;
+ bf->bf_readable = 1;
+ if (do_notify)
+ au_fhsm_notify(sb, /*val*/1);
+ if (rstfs)
+ *rstfs = bf->bf_stfs;
+
+out:
+ if (do_lock)
+ mutex_unlock(&bf->bf_lock);
+ au_fhsm_notify(sb, /*val*/1);
+
+ return err;
+}
+
+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+ struct au_branch *br;
+ struct au_br_fhsm *bf;
+
+ AuDbg("b%d, force %d\n", bindex, force);
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ if (!au_ftest_si(sbinfo, FHSM)
+ || fhsm->fhsm_bottom == bindex)
+ return;
+
+ br = au_sbr(sb, bindex);
+ bf = br->br_fhsm;
+ AuDebugOn(!bf);
+ mutex_lock(&bf->bf_lock);
+ if (force
+ || au_fhsm_pid(fhsm)
+ || au_fhsm_test_jiffy(sbinfo, br))
+ err = au_fhsm_stfs(sb, bindex, /*rstfs*/NULL, /*do_lock*/0,
+ /*do_notify*/1);
+ mutex_unlock(&bf->bf_lock);
+}
+
+void au_fhsm_wrote_all(struct super_block *sb, int force)
+{
+ aufs_bindex_t bindex, bbot;
+ struct au_branch *br;
+
+ /* exclude the bottom */
+ bbot = au_fhsm_bottom(sb);
+ for (bindex = 0; bindex < bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_fhsm(br->br_perm))
+ au_fhsm_wrote(sb, bindex, force);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static __poll_t au_fhsm_poll(struct file *file, struct poll_table_struct *wait)
+{
+ __poll_t mask;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ mask = 0;
+ sbinfo = file->private_data;
+ fhsm = &sbinfo->si_fhsm;
+ poll_wait(file, &fhsm->fhsm_wqh, wait);
+ if (atomic_read(&fhsm->fhsm_readable))
+ mask = EPOLLIN /* | EPOLLRDNORM */;
+
+ if (!mask)
+ AuDbg("mask 0x%x\n", mask);
+ return mask;
+}
+
+static int au_fhsm_do_read_one(struct aufs_stbr __user *stbr,
+ struct aufs_stfs *stfs, __s16 brid)
+{
+ int err;
+
+ err = copy_to_user(&stbr->stfs, stfs, sizeof(*stfs));
+ if (!err)
+ err = __put_user(brid, &stbr->brid);
+ if (unlikely(err))
+ err = -EFAULT;
+
+ return err;
+}
+
+static ssize_t au_fhsm_do_read(struct super_block *sb,
+ struct aufs_stbr __user *stbr, size_t count)
+{
+ ssize_t err;
+ int nstbr;
+ aufs_bindex_t bindex, bbot;
+ struct au_branch *br;
+ struct au_br_fhsm *bf;
+
+ /* except the bottom branch */
+ err = 0;
+ nstbr = 0;
+ bbot = au_fhsm_bottom(sb);
+ for (bindex = 0; !err && bindex < bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!au_br_fhsm(br->br_perm))
+ continue;
+
+ bf = br->br_fhsm;
+ mutex_lock(&bf->bf_lock);
+ if (bf->bf_readable) {
+ err = -EFAULT;
+ if (count >= sizeof(*stbr))
+ err = au_fhsm_do_read_one(stbr++, &bf->bf_stfs,
+ br->br_id);
+ if (!err) {
+ bf->bf_readable = 0;
+ count -= sizeof(*stbr);
+ nstbr++;
+ }
+ }
+ mutex_unlock(&bf->bf_lock);
+ }
+ if (!err)
+ err = sizeof(*stbr) * nstbr;
+
+ return err;
+}
+
+static ssize_t au_fhsm_read(struct file *file, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ ssize_t err;
+ int readable;
+ aufs_bindex_t nfhsm, bindex, bbot;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+ struct au_branch *br;
+ struct super_block *sb;
+
+ err = 0;
+ sbinfo = file->private_data;
+ fhsm = &sbinfo->si_fhsm;
+need_data:
+ spin_lock_irq(&fhsm->fhsm_wqh.lock);
+ if (!atomic_read(&fhsm->fhsm_readable)) {
+ if (vfsub_file_flags(file) & O_NONBLOCK)
+ err = -EAGAIN;
+ else
+ err = wait_event_interruptible_locked_irq
+ (fhsm->fhsm_wqh,
+ atomic_read(&fhsm->fhsm_readable));
+ }
+ spin_unlock_irq(&fhsm->fhsm_wqh.lock);
+ if (unlikely(err))
+ goto out;
+
+ /* sb may already be dead */
+ au_rw_read_lock(&sbinfo->si_rwsem);
+ readable = atomic_read(&fhsm->fhsm_readable);
+ if (readable > 0) {
+ sb = sbinfo->si_sb;
+ AuDebugOn(!sb);
+ /* exclude the bottom branch */
+ nfhsm = 0;
+ bbot = au_fhsm_bottom(sb);
+ for (bindex = 0; bindex < bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_fhsm(br->br_perm))
+ nfhsm++;
+ }
+ err = -EMSGSIZE;
+ if (nfhsm * sizeof(struct aufs_stbr) <= count) {
+ atomic_set(&fhsm->fhsm_readable, 0);
+ err = au_fhsm_do_read(sbinfo->si_sb, (void __user *)buf,
+ count);
+ }
+ }
+ au_rw_read_unlock(&sbinfo->si_rwsem);
+ if (!readable)
+ goto need_data;
+
+out:
+ return err;
+}
+
+static int au_fhsm_release(struct inode *inode, struct file *file)
+{
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ /* sb may already be dead */
+ sbinfo = file->private_data;
+ fhsm = &sbinfo->si_fhsm;
+ spin_lock(&fhsm->fhsm_spin);
+ fhsm->fhsm_pid = 0;
+ spin_unlock(&fhsm->fhsm_spin);
+ kobject_put(&sbinfo->si_kobj);
+
+ return 0;
+}
+
+static const struct file_operations au_fhsm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .read = au_fhsm_read,
+ .poll = au_fhsm_poll,
+ .release = au_fhsm_release
+};
+
+int au_fhsm_fd(struct super_block *sb, int oflags)
+{
+ int err, fd;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ err = -EPERM;
+ if (unlikely(!capable(CAP_SYS_ADMIN)))
+ goto out;
+
+ err = -EINVAL;
+ if (unlikely(oflags & ~(O_CLOEXEC | O_NONBLOCK)))
+ goto out;
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ spin_lock(&fhsm->fhsm_spin);
+ if (!fhsm->fhsm_pid)
+ fhsm->fhsm_pid = current->pid;
+ else
+ err = -EBUSY;
+ spin_unlock(&fhsm->fhsm_spin);
+ if (unlikely(err))
+ goto out;
+
+ oflags |= O_RDONLY;
+ /* oflags |= FMODE_NONOTIFY; */
+ fd = anon_inode_getfd("[aufs_fhsm]", &au_fhsm_fops, sbinfo, oflags);
+ err = fd;
+ if (unlikely(fd < 0))
+ goto out_pid;
+
+ /* succeed regardless 'fhsm' status */
+ kobject_get(&sbinfo->si_kobj);
+ si_noflush_read_lock(sb);
+ if (au_ftest_si(sbinfo, FHSM))
+ au_fhsm_wrote_all(sb, /*force*/0);
+ si_read_unlock(sb);
+ goto out; /* success */
+
+out_pid:
+ spin_lock(&fhsm->fhsm_spin);
+ fhsm->fhsm_pid = 0;
+ spin_unlock(&fhsm->fhsm_spin);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_fhsm_br_alloc(struct au_branch *br)
+{
+ int err;
+
+ err = 0;
+ br->br_fhsm = kmalloc(sizeof(*br->br_fhsm), GFP_NOFS);
+ if (br->br_fhsm)
+ au_br_fhsm_init(br->br_fhsm);
+ else
+ err = -ENOMEM;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_fhsm_fin(struct super_block *sb)
+{
+ au_fhsm_notify(sb, /*val*/-1);
+}
+
+void au_fhsm_init(struct au_sbinfo *sbinfo)
+{
+ struct au_fhsm *fhsm;
+
+ fhsm = &sbinfo->si_fhsm;
+ spin_lock_init(&fhsm->fhsm_spin);
+ init_waitqueue_head(&fhsm->fhsm_wqh);
+ atomic_set(&fhsm->fhsm_readable, 0);
+ fhsm->fhsm_expire
+ = msecs_to_jiffies(AUFS_FHSM_CACHE_DEF_SEC * MSEC_PER_SEC);
+ fhsm->fhsm_bottom = -1;
+}
+
+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec)
+{
+ sbinfo->si_fhsm.fhsm_expire
+ = msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo)
+{
+ unsigned int u;
+
+ if (!au_ftest_si(sbinfo, FHSM))
+ return;
+
+ u = jiffies_to_msecs(sbinfo->si_fhsm.fhsm_expire) / MSEC_PER_SEC;
+ if (u != AUFS_FHSM_CACHE_DEF_SEC)
+ seq_printf(seq, ",fhsm_sec=%u", u);
+}
diff --git a/fs/aufs/file.c b/fs/aufs/file.c
new file mode 100644
index 000000000000..dbb33855d226
--- /dev/null
+++ b/fs/aufs/file.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * handling file/dir, and address_space operation
+ */
+
+#ifdef CONFIG_AUFS_DEBUG
+#include <linux/migrate.h>
+#endif
+#include <linux/pagemap.h>
+#include "aufs.h"
+
+/* drop flags for writing */
+unsigned int au_file_roflags(unsigned int flags)
+{
+ flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC);
+ flags |= O_RDONLY | O_NOATIME;
+ return flags;
+}
+
+/* common functions to regular file and dir */
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+ struct file *file, int force_wr)
+{
+ struct file *h_file;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct path h_path;
+ int err;
+
+ /* a race condition can happen between open and unlink/rmdir */
+ h_file = ERR_PTR(-ENOENT);
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (au_test_nfsd() && (!h_dentry || d_is_negative(h_dentry)))
+ goto out;
+ h_inode = d_inode(h_dentry);
+ spin_lock(&h_dentry->d_lock);
+ err = (!d_unhashed(dentry) && d_unlinked(h_dentry))
+ /* || !d_inode(dentry)->i_nlink */
+ ;
+ spin_unlock(&h_dentry->d_lock);
+ if (unlikely(err))
+ goto out;
+
+ sb = dentry->d_sb;
+ br = au_sbr(sb, bindex);
+ err = au_br_test_oflag(flags, br);
+ h_file = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ /* drop flags for writing */
+ if (au_test_ro(sb, bindex, d_inode(dentry))) {
+ if (force_wr && !(flags & O_WRONLY))
+ force_wr = 0;
+ flags = au_file_roflags(flags);
+ if (force_wr) {
+ h_file = ERR_PTR(-EROFS);
+ flags = au_file_roflags(flags);
+ if (unlikely(vfsub_native_ro(h_inode)
+ || IS_APPEND(h_inode)))
+ goto out;
+ flags &= ~O_ACCMODE;
+ flags |= O_WRONLY;
+ }
+ }
+ flags &= ~O_CREAT;
+ au_lcnt_inc(&br->br_nfiles);
+ h_path.dentry = h_dentry;
+ h_path.mnt = au_br_mnt(br);
+ h_file = vfsub_dentry_open(&h_path, flags);
+ if (IS_ERR(h_file))
+ goto out_br;
+
+ if (flags & __FMODE_EXEC) {
+ err = deny_write_access(h_file);
+ if (unlikely(err)) {
+ fput(h_file);
+ h_file = ERR_PTR(err);
+ goto out_br;
+ }
+ }
+ fsnotify_open(h_file);
+ goto out; /* success */
+
+out_br:
+ au_lcnt_dec(&br->br_nfiles);
+out:
+ return h_file;
+}
+
+static int au_cmoo(struct dentry *dentry)
+{
+ int err, cmoo, matched;
+ unsigned int udba;
+ struct path h_path;
+ struct au_pin pin;
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = -1,
+ .bsrc = -1,
+ .len = -1,
+ .pin = &pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+ struct inode *delegated;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+ pid_t pid;
+ struct au_branch *br;
+ struct dentry *parent;
+ struct au_hinode *hdir;
+
+ DiMustWriteLock(dentry);
+ IiMustWriteLock(d_inode(dentry));
+
+ err = 0;
+ if (IS_ROOT(dentry))
+ goto out;
+ cpg.bsrc = au_dbtop(dentry);
+ if (!cpg.bsrc)
+ goto out;
+
+ sb = dentry->d_sb;
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ pid = au_fhsm_pid(fhsm);
+ rcu_read_lock();
+ matched = (pid
+ && (current->pid == pid
+ || rcu_dereference(current->real_parent)->pid == pid));
+ rcu_read_unlock();
+ if (matched)
+ goto out;
+
+ br = au_sbr(sb, cpg.bsrc);
+ cmoo = au_br_cmoo(br->br_perm);
+ if (!cmoo)
+ goto out;
+ if (!d_is_reg(dentry))
+ cmoo &= AuBrAttr_COO_ALL;
+ if (!cmoo)
+ goto out;
+
+ parent = dget_parent(dentry);
+ di_write_lock_parent(parent);
+ err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1);
+ cpg.bdst = err;
+ if (unlikely(err < 0)) {
+ err = 0; /* there is no upper writable branch */
+ goto out_dgrade;
+ }
+ AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst);
+
+ /* do not respect the coo attrib for the target branch */
+ err = au_cpup_dirs(dentry, cpg.bdst);
+ if (unlikely(err))
+ goto out_dgrade;
+
+ di_downgrade_lock(parent, AuLock_IR);
+ udba = au_opt_udba(sb);
+ err = au_pin(&pin, dentry, cpg.bdst, udba,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out_parent;
+
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&pin);
+ if (unlikely(err))
+ goto out_parent;
+ if (!(cmoo & AuBrWAttr_MOO))
+ goto out_parent; /* success */
+
+ err = au_pin(&pin, dentry, cpg.bsrc, udba,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out_parent;
+
+ h_path.mnt = au_br_mnt(br);
+ h_path.dentry = au_h_dptr(dentry, cpg.bsrc);
+ hdir = au_hi(d_inode(parent), cpg.bsrc);
+ delegated = NULL;
+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1);
+ au_unpin(&pin);
+ /* todo: keep h_dentry or not? */
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ if (unlikely(err)) {
+ pr_err("unlink %pd after coo failed (%d), ignored\n",
+ dentry, err);
+ err = 0;
+ }
+ goto out_parent; /* success */
+
+out_dgrade:
+ di_downgrade_lock(parent, AuLock_IR);
+out_parent:
+ di_read_unlock(parent, AuLock_IR);
+ dput(parent);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_do_open(struct file *file, struct au_do_open_args *args)
+{
+ int err, aopen = args->aopen;
+ struct dentry *dentry;
+ struct au_finfo *finfo;
+
+ if (!aopen)
+ err = au_finfo_init(file, args->fidir);
+ else {
+ lockdep_off();
+ err = au_finfo_init(file, args->fidir);
+ lockdep_on();
+ }
+ if (unlikely(err))
+ goto out;
+
+ dentry = file->f_path.dentry;
+ AuDebugOn(IS_ERR_OR_NULL(dentry));
+ di_write_lock_child(dentry);
+ err = au_cmoo(dentry);
+ di_downgrade_lock(dentry, AuLock_IR);
+ if (!err) {
+ if (!aopen)
+ err = args->open(file, vfsub_file_flags(file), NULL);
+ else {
+ lockdep_off();
+ err = args->open(file, vfsub_file_flags(file),
+ args->h_file);
+ lockdep_on();
+ }
+ }
+ di_read_unlock(dentry, AuLock_IR);
+
+ finfo = au_fi(file);
+ if (!err) {
+ finfo->fi_file = file;
+ au_hbl_add(&finfo->fi_hlist,
+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
+ }
+ if (!aopen)
+ fi_write_unlock(file);
+ else {
+ lockdep_off();
+ fi_write_unlock(file);
+ lockdep_on();
+ }
+ if (unlikely(err)) {
+ finfo->fi_hdir = NULL;
+ au_finfo_fin(file);
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_reopen_nondir(struct file *file)
+{
+ int err;
+ aufs_bindex_t btop;
+ struct dentry *dentry;
+ struct au_branch *br;
+ struct file *h_file, *h_file_tmp;
+
+ dentry = file->f_path.dentry;
+ btop = au_dbtop(dentry);
+ br = au_sbr(dentry->d_sb, btop);
+ h_file_tmp = NULL;
+ if (au_fbtop(file) == btop) {
+ h_file = au_hf_top(file);
+ if (file->f_mode == h_file->f_mode)
+ return 0; /* success */
+ h_file_tmp = h_file;
+ get_file(h_file_tmp);
+ au_lcnt_inc(&br->br_nfiles);
+ au_set_h_fptr(file, btop, NULL);
+ }
+ AuDebugOn(au_fi(file)->fi_hdir);
+ /*
+ * it can happen
+ * file exists on both of rw and ro
+ * open --> dbtop and fbtop are both 0
+ * prepend a branch as rw, "rw" become ro
+ * remove rw/file
+ * delete the top branch, "rw" becomes rw again
+ * --> dbtop is 1, fbtop is still 0
+ * write --> fbtop is 0 but dbtop is 1
+ */
+ /* AuDebugOn(au_fbtop(file) < btop); */
+
+ h_file = au_h_open(dentry, btop, vfsub_file_flags(file) & ~O_TRUNC,
+ file, /*force_wr*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file)) {
+ if (h_file_tmp) {
+ /* revert */
+ au_set_h_fptr(file, btop, h_file_tmp);
+ h_file_tmp = NULL;
+ }
+ goto out; /* todo: close all? */
+ }
+
+ err = 0;
+ au_set_fbtop(file, btop);
+ au_set_h_fptr(file, btop, h_file);
+ au_update_figen(file);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+
+out:
+ if (h_file_tmp) {
+ fput(h_file_tmp);
+ au_lcnt_dec(&br->br_nfiles);
+ }
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_reopen_wh(struct file *file, aufs_bindex_t btgt,
+ struct dentry *hi_wh)
+{
+ int err;
+ aufs_bindex_t btop;
+ struct au_dinfo *dinfo;
+ struct dentry *h_dentry;
+ struct au_hdentry *hdp;
+
+ dinfo = au_di(file->f_path.dentry);
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ btop = dinfo->di_btop;
+ dinfo->di_btop = btgt;
+ hdp = au_hdentry(dinfo, btgt);
+ h_dentry = hdp->hd_dentry;
+ hdp->hd_dentry = hi_wh;
+ err = au_reopen_nondir(file);
+ hdp->hd_dentry = h_dentry;
+ dinfo->di_btop = btop;
+
+ return err;
+}
+
+static int au_ready_to_write_wh(struct file *file, loff_t len,
+ aufs_bindex_t bcpup, struct au_pin *pin)
+{
+ int err;
+ struct inode *inode, *h_inode;
+ struct dentry *h_dentry, *hi_wh;
+ struct au_cp_generic cpg = {
+ .dentry = file->f_path.dentry,
+ .bdst = bcpup,
+ .bsrc = -1,
+ .len = len,
+ .pin = pin
+ };
+
+ au_update_dbtop(cpg.dentry);
+ inode = d_inode(cpg.dentry);
+ h_inode = NULL;
+ if (au_dbtop(cpg.dentry) <= bcpup
+ && au_dbbot(cpg.dentry) >= bcpup) {
+ h_dentry = au_h_dptr(cpg.dentry, bcpup);
+ if (h_dentry && d_is_positive(h_dentry))
+ h_inode = d_inode(h_dentry);
+ }
+ hi_wh = au_hi_wh(inode, bcpup);
+ if (!hi_wh && !h_inode)
+ err = au_sio_cpup_wh(&cpg, file);
+ else
+ /* already copied-up after unlink */
+ err = au_reopen_wh(file, bcpup, hi_wh);
+
+ if (!err
+ && (inode->i_nlink > 1
+ || (inode->i_state & I_LINKABLE))
+ && au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK))
+ au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup));
+
+ return err;
+}
+
+/*
+ * prepare the @file for writing.
+ */
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin)
+{
+ int err;
+ aufs_bindex_t dbtop;
+ struct dentry *parent;
+ struct inode *inode;
+ struct super_block *sb;
+ struct file *h_file;
+ struct au_cp_generic cpg = {
+ .dentry = file->f_path.dentry,
+ .bdst = -1,
+ .bsrc = -1,
+ .len = len,
+ .pin = pin,
+ .flags = AuCpup_DTIME
+ };
+
+ sb = cpg.dentry->d_sb;
+ inode = d_inode(cpg.dentry);
+ cpg.bsrc = au_fbtop(file);
+ err = au_test_ro(sb, cpg.bsrc, inode);
+ if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) {
+ err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE,
+ /*flags*/0);
+ goto out;
+ }
+
+ /* need to cpup or reopen */
+ parent = dget_parent(cpg.dentry);
+ di_write_lock_parent(parent);
+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+ cpg.bdst = err;
+ if (unlikely(err < 0))
+ goto out_dgrade;
+ err = 0;
+
+ if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) {
+ err = au_cpup_dirs(cpg.dentry, cpg.bdst);
+ if (unlikely(err))
+ goto out_dgrade;
+ }
+
+ err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out_dgrade;
+
+ dbtop = au_dbtop(cpg.dentry);
+ if (dbtop <= cpg.bdst)
+ cpg.bsrc = cpg.bdst;
+
+ if (dbtop <= cpg.bdst /* just reopen */
+ || !d_unhashed(cpg.dentry) /* copyup and reopen */
+ ) {
+ h_file = au_h_open_pre(cpg.dentry, cpg.bsrc, /*force_wr*/0);
+ if (IS_ERR(h_file))
+ err = PTR_ERR(h_file);
+ else {
+ di_downgrade_lock(parent, AuLock_IR);
+ if (dbtop > cpg.bdst)
+ err = au_sio_cpup_simple(&cpg);
+ if (!err)
+ err = au_reopen_nondir(file);
+ au_h_open_post(cpg.dentry, cpg.bsrc, h_file);
+ }
+ } else { /* copyup as wh and reopen */
+ /*
+ * since writable hfsplus branch is not supported,
+ * h_open_pre/post() are unnecessary.
+ */
+ err = au_ready_to_write_wh(file, len, cpg.bdst, pin);
+ di_downgrade_lock(parent, AuLock_IR);
+ }
+
+ if (!err) {
+ au_pin_set_parent_lflag(pin, /*lflag*/0);
+ goto out_dput; /* success */
+ }
+ au_unpin(pin);
+ goto out_unlock;
+
+out_dgrade:
+ di_downgrade_lock(parent, AuLock_IR);
+out_unlock:
+ di_read_unlock(parent, AuLock_IR);
+out_dput:
+ dput(parent);
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_do_flush(struct file *file, fl_owner_t id,
+ int (*flush)(struct file *file, fl_owner_t id))
+{
+ int err;
+ struct super_block *sb;
+ struct inode *inode;
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ si_noflush_read_lock(sb);
+ fi_read_lock(file);
+ ii_read_lock_child(inode);
+
+ err = flush(file, id);
+ au_cpup_attr_timesizes(inode);
+
+ ii_read_unlock(inode);
+ fi_read_unlock(file);
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_file_refresh_by_inode(struct file *file, int *need_reopen)
+{
+ int err;
+ struct au_pin pin;
+ struct au_finfo *finfo;
+ struct dentry *parent, *hi_wh;
+ struct inode *inode;
+ struct super_block *sb;
+ struct au_cp_generic cpg = {
+ .dentry = file->f_path.dentry,
+ .bdst = -1,
+ .bsrc = -1,
+ .len = -1,
+ .pin = &pin,
+ .flags = AuCpup_DTIME
+ };
+
+ FiMustWriteLock(file);
+
+ err = 0;
+ finfo = au_fi(file);
+ sb = cpg.dentry->d_sb;
+ inode = d_inode(cpg.dentry);
+ cpg.bdst = au_ibtop(inode);
+ if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry))
+ goto out;
+
+ parent = dget_parent(cpg.dentry);
+ if (au_test_ro(sb, cpg.bdst, inode)) {
+ di_read_lock_parent(parent, !AuLock_IR);
+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+ cpg.bdst = err;
+ di_read_unlock(parent, !AuLock_IR);
+ if (unlikely(err < 0))
+ goto out_parent;
+ err = 0;
+ }
+
+ di_read_lock_parent(parent, AuLock_IR);
+ hi_wh = au_hi_wh(inode, cpg.bdst);
+ if (!S_ISDIR(inode->i_mode)
+ && au_opt_test(au_mntflags(sb), PLINK)
+ && au_plink_test(inode)
+ && !d_unhashed(cpg.dentry)
+ && cpg.bdst < au_dbtop(cpg.dentry)) {
+ err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst);
+ if (unlikely(err))
+ goto out_unlock;
+
+ /* always superio. */
+ err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (!err) {
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&pin);
+ }
+ } else if (hi_wh) {
+ /* already copied-up after unlink */
+ err = au_reopen_wh(file, cpg.bdst, hi_wh);
+ *need_reopen = 0;
+ }
+
+out_unlock:
+ di_read_unlock(parent, AuLock_IR);
+out_parent:
+ dput(parent);
+out:
+ return err;
+}
+
+static void au_do_refresh_dir(struct file *file)
+{
+ aufs_bindex_t bindex, bbot, new_bindex, brid;
+ struct au_hfile *p, tmp, *q;
+ struct au_finfo *finfo;
+ struct super_block *sb;
+ struct au_fidir *fidir;
+
+ FiMustWriteLock(file);
+
+ sb = file->f_path.dentry->d_sb;
+ finfo = au_fi(file);
+ fidir = finfo->fi_hdir;
+ AuDebugOn(!fidir);
+ p = fidir->fd_hfile + finfo->fi_btop;
+ brid = p->hf_br->br_id;
+ bbot = fidir->fd_bbot;
+ for (bindex = finfo->fi_btop; bindex <= bbot; bindex++, p++) {
+ if (!p->hf_file)
+ continue;
+
+ new_bindex = au_br_index(sb, p->hf_br->br_id);
+ if (new_bindex == bindex)
+ continue;
+ if (new_bindex < 0) {
+ au_set_h_fptr(file, bindex, NULL);
+ continue;
+ }
+
+ /* swap two lower inode, and loop again */
+ q = fidir->fd_hfile + new_bindex;
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ if (tmp.hf_file) {
+ bindex--;
+ p--;
+ }
+ }
+
+ p = fidir->fd_hfile;
+ if (!au_test_mmapped(file) && !d_unlinked(file->f_path.dentry)) {
+ bbot = au_sbbot(sb);
+ for (finfo->fi_btop = 0; finfo->fi_btop <= bbot;
+ finfo->fi_btop++, p++)
+ if (p->hf_file) {
+ if (file_inode(p->hf_file))
+ break;
+ au_hfput(p, /*execed*/0);
+ }
+ } else {
+ bbot = au_br_index(sb, brid);
+ for (finfo->fi_btop = 0; finfo->fi_btop < bbot;
+ finfo->fi_btop++, p++)
+ if (p->hf_file)
+ au_hfput(p, /*execed*/0);
+ bbot = au_sbbot(sb);
+ }
+
+ p = fidir->fd_hfile + bbot;
+ for (fidir->fd_bbot = bbot; fidir->fd_bbot >= finfo->fi_btop;
+ fidir->fd_bbot--, p--)
+ if (p->hf_file) {
+ if (file_inode(p->hf_file))
+ break;
+ au_hfput(p, /*execed*/0);
+ }
+ AuDebugOn(fidir->fd_bbot < finfo->fi_btop);
+}
+
+/*
+ * after branch manipulating, refresh the file.
+ */
+static int refresh_file(struct file *file, int (*reopen)(struct file *file))
+{
+ int err, need_reopen, nbr;
+ aufs_bindex_t bbot, bindex;
+ struct dentry *dentry;
+ struct super_block *sb;
+ struct au_finfo *finfo;
+ struct au_hfile *hfile;
+
+ dentry = file->f_path.dentry;
+ sb = dentry->d_sb;
+ nbr = au_sbbot(sb) + 1;
+ finfo = au_fi(file);
+ if (!finfo->fi_hdir) {
+ hfile = &finfo->fi_htop;
+ AuDebugOn(!hfile->hf_file);
+ bindex = au_br_index(sb, hfile->hf_br->br_id);
+ AuDebugOn(bindex < 0);
+ if (bindex != finfo->fi_btop)
+ au_set_fbtop(file, bindex);
+ } else {
+ err = au_fidir_realloc(finfo, nbr, /*may_shrink*/0);
+ if (unlikely(err))
+ goto out;
+ au_do_refresh_dir(file);
+ }
+
+ err = 0;
+ need_reopen = 1;
+ if (!au_test_mmapped(file))
+ err = au_file_refresh_by_inode(file, &need_reopen);
+ if (finfo->fi_hdir)
+ /* harmless if err */
+ au_fidir_realloc(finfo, nbr, /*may_shrink*/1);
+ if (!err && need_reopen && !d_unlinked(dentry))
+ err = reopen(file);
+ if (!err) {
+ au_update_figen(file);
+ goto out; /* success */
+ }
+
+ /* error, close all lower files */
+ if (finfo->fi_hdir) {
+ bbot = au_fbbot_dir(file);
+ for (bindex = au_fbtop(file); bindex <= bbot; bindex++)
+ au_set_h_fptr(file, bindex, NULL);
+ }
+
+out:
+ return err;
+}
+
+/* common function to regular file and dir */
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+ int wlock, unsigned int fi_lsc)
+{
+ int err;
+ unsigned int sigen, figen;
+ aufs_bindex_t btop;
+ unsigned char pseudo_link;
+ struct dentry *dentry;
+ struct inode *inode;
+
+ err = 0;
+ dentry = file->f_path.dentry;
+ inode = d_inode(dentry);
+ sigen = au_sigen(dentry->d_sb);
+ fi_write_lock_nested(file, fi_lsc);
+ figen = au_figen(file);
+ if (!fi_lsc)
+ di_write_lock_child(dentry);
+ else
+ di_write_lock_child2(dentry);
+ btop = au_dbtop(dentry);
+ pseudo_link = (btop != au_ibtop(inode));
+ if (sigen == figen && !pseudo_link && au_fbtop(file) == btop) {
+ if (!wlock) {
+ di_downgrade_lock(dentry, AuLock_IR);
+ fi_downgrade_lock(file);
+ }
+ goto out; /* success */
+ }
+
+ AuDbg("sigen %d, figen %d\n", sigen, figen);
+ if (au_digen_test(dentry, sigen)) {
+ err = au_reval_dpath(dentry, sigen);
+ AuDebugOn(!err && au_digen_test(dentry, sigen));
+ }
+
+ if (!err)
+ err = refresh_file(file, reopen);
+ if (!err) {
+ if (!wlock) {
+ di_downgrade_lock(dentry, AuLock_IR);
+ fi_downgrade_lock(file);
+ }
+ } else {
+ di_write_unlock(dentry);
+ fi_write_unlock(file);
+ }
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* cf. aufs_nopage() */
+/* for madvise(2) */
+static int aufs_readpage(struct file *file __maybe_unused, struct page *page)
+{
+ unlock_page(page);
+ return 0;
+}
+
+/* it will never be called, but necessary to support O_DIRECT */
+static ssize_t aufs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{ BUG(); return 0; }
+
+/* they will never be called. */
+#ifdef CONFIG_AUFS_DEBUG
+static int aufs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_writepage(struct page *page, struct writeback_control *wbc)
+{ AuUnsupport(); return 0; }
+
+static int aufs_set_page_dirty(struct page *page)
+{ AuUnsupport(); return 0; }
+static void aufs_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length)
+{ AuUnsupport(); }
+static int aufs_releasepage(struct page *page, gfp_t gfp)
+{ AuUnsupport(); return 0; }
+#if 0 /* called by memory compaction regardless file */
+static int aufs_migratepage(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{ AuUnsupport(); return 0; }
+#endif
+static bool aufs_isolate_page(struct page *page, isolate_mode_t mode)
+{ AuUnsupport(); return true; }
+static void aufs_putback_page(struct page *page)
+{ AuUnsupport(); }
+static int aufs_launder_page(struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_is_partially_uptodate(struct page *page,
+ unsigned long from,
+ unsigned long count)
+{ AuUnsupport(); return 0; }
+static void aufs_is_dirty_writeback(struct page *page, bool *dirty,
+ bool *writeback)
+{ AuUnsupport(); }
+static int aufs_error_remove_page(struct address_space *mapping,
+ struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ sector_t *span)
+{ AuUnsupport(); return 0; }
+static void aufs_swap_deactivate(struct file *file)
+{ AuUnsupport(); }
+#endif /* CONFIG_AUFS_DEBUG */
+
+const struct address_space_operations aufs_aop = {
+ .readpage = aufs_readpage,
+ .direct_IO = aufs_direct_IO,
+#ifdef CONFIG_AUFS_DEBUG
+ .writepage = aufs_writepage,
+ /* no writepages, because of writepage */
+ .set_page_dirty = aufs_set_page_dirty,
+ /* no readpages, because of readpage */
+ .write_begin = aufs_write_begin,
+ .write_end = aufs_write_end,
+ /* no bmap, no block device */
+ .invalidatepage = aufs_invalidatepage,
+ .releasepage = aufs_releasepage,
+ /* is fallback_migrate_page ok? */
+ /* .migratepage = aufs_migratepage, */
+ .isolate_page = aufs_isolate_page,
+ .putback_page = aufs_putback_page,
+ .launder_page = aufs_launder_page,
+ .is_partially_uptodate = aufs_is_partially_uptodate,
+ .is_dirty_writeback = aufs_is_dirty_writeback,
+ .error_remove_page = aufs_error_remove_page,
+ .swap_activate = aufs_swap_activate,
+ .swap_deactivate = aufs_swap_deactivate
+#endif /* CONFIG_AUFS_DEBUG */
+};
diff --git a/fs/aufs/file.h b/fs/aufs/file.h
new file mode 100644
index 000000000000..c679a19df528
--- /dev/null
+++ b/fs/aufs/file.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * file operations
+ */
+
+#ifndef __AUFS_FILE_H__
+#define __AUFS_FILE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mm_types.h>
+#include <linux/poll.h>
+#include "rwsem.h"
+
+struct au_branch;
+struct au_hfile {
+ struct file *hf_file;
+ struct au_branch *hf_br;
+};
+
+struct au_vdir;
+struct au_fidir {
+ aufs_bindex_t fd_bbot;
+ aufs_bindex_t fd_nent;
+ struct au_vdir *fd_vdir_cache;
+ struct au_hfile fd_hfile[];
+};
+
+static inline int au_fidir_sz(int nent)
+{
+ AuDebugOn(nent < 0);
+ return sizeof(struct au_fidir) + sizeof(struct au_hfile) * nent;
+}
+
+struct au_finfo {
+ atomic_t fi_generation;
+
+ struct au_rwsem fi_rwsem;
+ aufs_bindex_t fi_btop;
+
+ /* do not union them */
+ struct { /* for non-dir */
+ struct au_hfile fi_htop;
+ atomic_t fi_mmapped;
+ };
+ struct au_fidir *fi_hdir; /* for dir only */
+
+ struct hlist_bl_node fi_hlist;
+ struct file *fi_file; /* very ugly */
+ struct rcu_head rcu;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* file.c */
+extern const struct address_space_operations aufs_aop;
+unsigned int au_file_roflags(unsigned int flags);
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+ struct file *file, int force_wr);
+struct au_do_open_args {
+ int aopen;
+ int (*open)(struct file *file, int flags,
+ struct file *h_file);
+ struct au_fidir *fidir;
+ struct file *h_file;
+};
+int au_do_open(struct file *file, struct au_do_open_args *args);
+int au_reopen_nondir(struct file *file);
+struct au_pin;
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin);
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+ int wlock, unsigned int fi_lsc);
+int au_do_flush(struct file *file, fl_owner_t id,
+ int (*flush)(struct file *file, fl_owner_t id));
+
+/* poll.c */
+#ifdef CONFIG_AUFS_POLL
+__poll_t aufs_poll(struct file *file, struct poll_table_struct *pt);
+#endif
+
+#ifdef CONFIG_AUFS_BR_HFSPLUS
+/* hfsplus.c */
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex,
+ int force_wr);
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+ struct file *h_file);
+#else
+AuStub(struct file *, au_h_open_pre, return NULL, struct dentry *dentry,
+ aufs_bindex_t bindex, int force_wr)
+AuStubVoid(au_h_open_post, struct dentry *dentry, aufs_bindex_t bindex,
+ struct file *h_file);
+#endif
+
+/* f_op.c */
+extern const struct file_operations aufs_file_fop;
+int au_do_open_nondir(struct file *file, int flags, struct file *h_file);
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file);
+struct file *au_read_pre(struct file *file, int keep_fi, unsigned int lsc);
+
+/* finfo.c */
+void au_hfput(struct au_hfile *hf, int execed);
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex,
+ struct file *h_file);
+
+void au_update_figen(struct file *file);
+struct au_fidir *au_fidir_alloc(struct super_block *sb);
+int au_fidir_realloc(struct au_finfo *finfo, int nbr, int may_shrink);
+
+void au_fi_init_once(void *_fi);
+void au_finfo_fin(struct file *file);
+int au_finfo_init(struct file *file, struct au_fidir *fidir);
+
+/* ioctl.c */
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+ unsigned long arg);
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_finfo *au_fi(struct file *file)
+{
+ return file->private_data;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define fi_read_lock(f) au_rw_read_lock(&au_fi(f)->fi_rwsem)
+#define fi_write_lock(f) au_rw_write_lock(&au_fi(f)->fi_rwsem)
+#define fi_read_trylock(f) au_rw_read_trylock(&au_fi(f)->fi_rwsem)
+#define fi_write_trylock(f) au_rw_write_trylock(&au_fi(f)->fi_rwsem)
+/*
+#define fi_read_trylock_nested(f) \
+ au_rw_read_trylock_nested(&au_fi(f)->fi_rwsem)
+#define fi_write_trylock_nested(f) \
+ au_rw_write_trylock_nested(&au_fi(f)->fi_rwsem)
+*/
+
+#define fi_read_unlock(f) au_rw_read_unlock(&au_fi(f)->fi_rwsem)
+#define fi_write_unlock(f) au_rw_write_unlock(&au_fi(f)->fi_rwsem)
+#define fi_downgrade_lock(f) au_rw_dgrade_lock(&au_fi(f)->fi_rwsem)
+
+/* lock subclass for finfo */
+enum {
+ AuLsc_FI_1,
+ AuLsc_FI_2
+};
+
+static inline void fi_read_lock_nested(struct file *f, unsigned int lsc)
+{
+ au_rw_read_lock_nested(&au_fi(f)->fi_rwsem, lsc);
+}
+
+static inline void fi_write_lock_nested(struct file *f, unsigned int lsc)
+{
+ au_rw_write_lock_nested(&au_fi(f)->fi_rwsem, lsc);
+}
+
+/*
+ * fi_read_lock_1, fi_write_lock_1,
+ * fi_read_lock_2, fi_write_lock_2
+ */
+#define AuReadLockFunc(name) \
+static inline void fi_read_lock_##name(struct file *f) \
+{ fi_read_lock_nested(f, AuLsc_FI_##name); }
+
+#define AuWriteLockFunc(name) \
+static inline void fi_write_lock_##name(struct file *f) \
+{ fi_write_lock_nested(f, AuLsc_FI_##name); }
+
+#define AuRWLockFuncs(name) \
+ AuReadLockFunc(name) \
+ AuWriteLockFunc(name)
+
+AuRWLockFuncs(1);
+AuRWLockFuncs(2);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define FiMustNoWaiters(f) AuRwMustNoWaiters(&au_fi(f)->fi_rwsem)
+#define FiMustAnyLock(f) AuRwMustAnyLock(&au_fi(f)->fi_rwsem)
+#define FiMustWriteLock(f) AuRwMustWriteLock(&au_fi(f)->fi_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: hard/soft set? */
+static inline aufs_bindex_t au_fbtop(struct file *file)
+{
+ FiMustAnyLock(file);
+ return au_fi(file)->fi_btop;
+}
+
+static inline aufs_bindex_t au_fbbot_dir(struct file *file)
+{
+ FiMustAnyLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ return au_fi(file)->fi_hdir->fd_bbot;
+}
+
+static inline struct au_vdir *au_fvdir_cache(struct file *file)
+{
+ FiMustAnyLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ return au_fi(file)->fi_hdir->fd_vdir_cache;
+}
+
+static inline void au_set_fbtop(struct file *file, aufs_bindex_t bindex)
+{
+ FiMustWriteLock(file);
+ au_fi(file)->fi_btop = bindex;
+}
+
+static inline void au_set_fbbot_dir(struct file *file, aufs_bindex_t bindex)
+{
+ FiMustWriteLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ au_fi(file)->fi_hdir->fd_bbot = bindex;
+}
+
+static inline void au_set_fvdir_cache(struct file *file,
+ struct au_vdir *vdir_cache)
+{
+ FiMustWriteLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ au_fi(file)->fi_hdir->fd_vdir_cache = vdir_cache;
+}
+
+static inline struct file *au_hf_top(struct file *file)
+{
+ FiMustAnyLock(file);
+ AuDebugOn(au_fi(file)->fi_hdir);
+ return au_fi(file)->fi_htop.hf_file;
+}
+
+static inline struct file *au_hf_dir(struct file *file, aufs_bindex_t bindex)
+{
+ FiMustAnyLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ return au_fi(file)->fi_hdir->fd_hfile[0 + bindex].hf_file;
+}
+
+/* todo: memory barrier? */
+static inline unsigned int au_figen(struct file *f)
+{
+ return atomic_read(&au_fi(f)->fi_generation);
+}
+
+static inline void au_set_mmapped(struct file *f)
+{
+ if (atomic_inc_return(&au_fi(f)->fi_mmapped))
+ return;
+ pr_warn("fi_mmapped wrapped around\n");
+ while (!atomic_inc_return(&au_fi(f)->fi_mmapped))
+ ;
+}
+
+static inline void au_unset_mmapped(struct file *f)
+{
+ atomic_dec(&au_fi(f)->fi_mmapped);
+}
+
+static inline int au_test_mmapped(struct file *f)
+{
+ return atomic_read(&au_fi(f)->fi_mmapped);
+}
+
+/* customize vma->vm_file */
+
+static inline void au_do_vm_file_reset(struct vm_area_struct *vma,
+ struct file *file)
+{
+ struct file *f;
+
+ f = vma->vm_file;
+ get_file(file);
+ vma->vm_file = file;
+ fput(f);
+}
+
+#ifdef CONFIG_MMU
+#define AuDbgVmRegion(file, vma) do {} while (0)
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+ struct file *file)
+{
+ au_do_vm_file_reset(vma, file);
+}
+#else
+#define AuDbgVmRegion(file, vma) \
+ AuDebugOn((vma)->vm_region && (vma)->vm_region->vm_file != (file))
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+ struct file *file)
+{
+ struct file *f;
+
+ au_do_vm_file_reset(vma, file);
+ f = vma->vm_region->vm_file;
+ get_file(file);
+ vma->vm_region->vm_file = file;
+ fput(f);
+}
+#endif /* CONFIG_MMU */
+
+/* handle vma->vm_prfile */
+static inline void au_vm_prfile_set(struct vm_area_struct *vma,
+ struct file *file)
+{
+ get_file(file);
+ vma->vm_prfile = file;
+#ifndef CONFIG_MMU
+ get_file(file);
+ vma->vm_region->vm_prfile = file;
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FILE_H__ */
diff --git a/fs/aufs/finfo.c b/fs/aufs/finfo.c
new file mode 100644
index 000000000000..448c5e9cd85d
--- /dev/null
+++ b/fs/aufs/finfo.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * file private data
+ */
+
+#include "aufs.h"
+
+void au_hfput(struct au_hfile *hf, int execed)
+{
+ if (execed)
+ allow_write_access(hf->hf_file);
+ fput(hf->hf_file);
+ hf->hf_file = NULL;
+ au_lcnt_dec(&hf->hf_br->br_nfiles);
+ hf->hf_br = NULL;
+}
+
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val)
+{
+ struct au_finfo *finfo = au_fi(file);
+ struct au_hfile *hf;
+ struct au_fidir *fidir;
+
+ fidir = finfo->fi_hdir;
+ if (!fidir) {
+ AuDebugOn(finfo->fi_btop != bindex);
+ hf = &finfo->fi_htop;
+ } else
+ hf = fidir->fd_hfile + bindex;
+
+ if (hf && hf->hf_file)
+ au_hfput(hf, vfsub_file_execed(file));
+ if (val) {
+ FiMustWriteLock(file);
+ AuDebugOn(IS_ERR_OR_NULL(file->f_path.dentry));
+ hf->hf_file = val;
+ hf->hf_br = au_sbr(file->f_path.dentry->d_sb, bindex);
+ }
+}
+
+void au_update_figen(struct file *file)
+{
+ atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_path.dentry));
+ /* smp_mb(); */ /* atomic_set */
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_fidir *au_fidir_alloc(struct super_block *sb)
+{
+ struct au_fidir *fidir;
+ int nbr;
+
+ nbr = au_sbbot(sb) + 1;
+ if (nbr < 2)
+ nbr = 2; /* initial allocate for 2 branches */
+ fidir = kzalloc(au_fidir_sz(nbr), GFP_NOFS);
+ if (fidir) {
+ fidir->fd_bbot = -1;
+ fidir->fd_nent = nbr;
+ }
+
+ return fidir;
+}
+
+int au_fidir_realloc(struct au_finfo *finfo, int nbr, int may_shrink)
+{
+ int err;
+ struct au_fidir *fidir, *p;
+
+ AuRwMustWriteLock(&finfo->fi_rwsem);
+ fidir = finfo->fi_hdir;
+ AuDebugOn(!fidir);
+
+ err = -ENOMEM;
+ p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr),
+ GFP_NOFS, may_shrink);
+ if (p) {
+ p->fd_nent = nbr;
+ finfo->fi_hdir = p;
+ err = 0;
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_finfo_fin(struct file *file)
+{
+ struct au_finfo *finfo;
+
+ au_lcnt_dec(&au_sbi(file->f_path.dentry->d_sb)->si_nfiles);
+
+ finfo = au_fi(file);
+ AuDebugOn(finfo->fi_hdir);
+ AuRwDestroy(&finfo->fi_rwsem);
+ au_cache_free_finfo(finfo);
+}
+
+void au_fi_init_once(void *_finfo)
+{
+ struct au_finfo *finfo = _finfo;
+
+ au_rw_init(&finfo->fi_rwsem);
+}
+
+int au_finfo_init(struct file *file, struct au_fidir *fidir)
+{
+ int err;
+ struct au_finfo *finfo;
+ struct dentry *dentry;
+
+ err = -ENOMEM;
+ dentry = file->f_path.dentry;
+ finfo = au_cache_alloc_finfo();
+ if (unlikely(!finfo))
+ goto out;
+
+ err = 0;
+ au_lcnt_inc(&au_sbi(dentry->d_sb)->si_nfiles);
+ au_rw_write_lock(&finfo->fi_rwsem);
+ finfo->fi_btop = -1;
+ finfo->fi_hdir = fidir;
+ atomic_set(&finfo->fi_generation, au_digen(dentry));
+ /* smp_mb(); */ /* atomic_set */
+
+ file->private_data = finfo;
+
+out:
+ return err;
+}
diff --git a/fs/aufs/fstype.h b/fs/aufs/fstype.h
new file mode 100644
index 000000000000..510bd87e9404
--- /dev/null
+++ b/fs/aufs/fstype.h
@@ -0,0 +1,401 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * judging filesystem type
+ */
+
+#ifndef __AUFS_FSTYPE_H__
+#define __AUFS_FSTYPE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/nfs_fs.h>
+#include <linux/romfs_fs.h>
+
+static inline int au_test_aufs(struct super_block *sb)
+{
+ return sb->s_magic == AUFS_SUPER_MAGIC;
+}
+
+static inline const char *au_sbtype(struct super_block *sb)
+{
+ return sb->s_type->name;
+}
+
+static inline int au_test_iso9660(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_ISO9660_FS)
+ return sb->s_magic == ISOFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_romfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_ROMFS_FS)
+ return sb->s_magic == ROMFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_cramfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_CRAMFS)
+ return sb->s_magic == CRAMFS_MAGIC;
+#endif
+ return 0;
+}
+
+static inline int au_test_nfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_NFS_FS)
+ return sb->s_magic == NFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_fuse(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_FUSE_FS)
+ return sb->s_magic == FUSE_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_xfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_XFS_FS)
+ return sb->s_magic == XFS_SB_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_tmpfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_TMPFS
+ return sb->s_magic == TMPFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_ECRYPT_FS)
+ return !strcmp(au_sbtype(sb), "ecryptfs");
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_ramfs(struct super_block *sb)
+{
+ return sb->s_magic == RAMFS_MAGIC;
+}
+
+static inline int au_test_ubifs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_UBIFS_FS)
+ return sb->s_magic == UBIFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_procfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_PROC_FS
+ return sb->s_magic == PROC_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_sysfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SYSFS
+ return sb->s_magic == SYSFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_configfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+ return sb->s_magic == CONFIGFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_minix(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_MINIX_FS)
+ return sb->s_magic == MINIX3_SUPER_MAGIC
+ || sb->s_magic == MINIX2_SUPER_MAGIC
+ || sb->s_magic == MINIX2_SUPER_MAGIC2
+ || sb->s_magic == MINIX_SUPER_MAGIC
+ || sb->s_magic == MINIX_SUPER_MAGIC2;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_fat(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_FAT_FS)
+ return sb->s_magic == MSDOS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_msdos(struct super_block *sb)
+{
+ return au_test_fat(sb);
+}
+
+static inline int au_test_vfat(struct super_block *sb)
+{
+ return au_test_fat(sb);
+}
+
+static inline int au_test_securityfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SECURITYFS
+ return sb->s_magic == SECURITYFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_squashfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_SQUASHFS)
+ return sb->s_magic == SQUASHFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_btrfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_BTRFS_FS)
+ return sb->s_magic == BTRFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_xenfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_XENFS)
+ return sb->s_magic == XENFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_debugfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_DEBUG_FS
+ return sb->s_magic == DEBUGFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_nilfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_NILFS)
+ return sb->s_magic == NILFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_hfsplus(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_HFSPLUS_FS)
+ return sb->s_magic == HFSPLUS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * they can't be an aufs branch.
+ */
+static inline int au_test_fs_unsuppoted(struct super_block *sb)
+{
+ return
+#ifndef CONFIG_AUFS_BR_RAMFS
+ au_test_ramfs(sb) ||
+#endif
+ au_test_procfs(sb)
+ || au_test_sysfs(sb)
+ || au_test_configfs(sb)
+ || au_test_debugfs(sb)
+ || au_test_securityfs(sb)
+ || au_test_xenfs(sb)
+ || au_test_ecryptfs(sb)
+ /* || !strcmp(au_sbtype(sb), "unionfs") */
+ || au_test_aufs(sb); /* will be supported in next version */
+}
+
+static inline int au_test_fs_remote(struct super_block *sb)
+{
+ return !au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+ && !au_test_ramfs(sb)
+#endif
+ && !(sb->s_type->fs_flags & FS_REQUIRES_DEV);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * Note: these functions (below) are created after reading ->getattr() in all
+ * filesystems under linux/fs. it means we have to do so in every update...
+ */
+
+/*
+ * some filesystems require getattr to refresh the inode attributes before
+ * referencing.
+ * in most cases, we can rely on the inode attribute in NFS (or every remote fs)
+ * and leave the work for d_revalidate()
+ */
+static inline int au_test_fs_refresh_iattr(struct super_block *sb)
+{
+ return au_test_nfs(sb)
+ || au_test_fuse(sb)
+ /* || au_test_btrfs(sb) */ /* untested */
+ ;
+}
+
+/*
+ * filesystems which don't maintain i_size or i_blocks.
+ */
+static inline int au_test_fs_bad_iattr_size(struct super_block *sb)
+{
+ return au_test_xfs(sb)
+ || au_test_btrfs(sb)
+ || au_test_ubifs(sb)
+ || au_test_hfsplus(sb) /* maintained, but incorrect */
+ /* || au_test_minix(sb) */ /* untested */
+ ;
+}
+
+/*
+ * filesystems which don't store the correct value in some of their inode
+ * attributes.
+ */
+static inline int au_test_fs_bad_iattr(struct super_block *sb)
+{
+ return au_test_fs_bad_iattr_size(sb)
+ || au_test_fat(sb)
+ || au_test_msdos(sb)
+ || au_test_vfat(sb);
+}
+
+/* they don't check i_nlink in link(2) */
+static inline int au_test_fs_no_limit_nlink(struct super_block *sb)
+{
+ return au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+ || au_test_ramfs(sb)
+#endif
+ || au_test_ubifs(sb)
+ || au_test_hfsplus(sb);
+}
+
+/*
+ * filesystems which sets S_NOATIME and S_NOCMTIME.
+ */
+static inline int au_test_fs_notime(struct super_block *sb)
+{
+ return au_test_nfs(sb)
+ || au_test_fuse(sb)
+ || au_test_ubifs(sb)
+ ;
+}
+
+/* temporary support for i#1 in cramfs */
+static inline int au_test_fs_unique_ino(struct inode *inode)
+{
+ if (au_test_cramfs(inode->i_sb))
+ return inode->i_ino != 1;
+ return 1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * the filesystem where the xino files placed must support i/o after unlink and
+ * maintain i_size and i_blocks.
+ */
+static inline int au_test_fs_bad_xino(struct super_block *sb)
+{
+ return au_test_fs_remote(sb)
+ || au_test_fs_bad_iattr_size(sb)
+ /* don't want unnecessary work for xino */
+ || au_test_aufs(sb)
+ || au_test_ecryptfs(sb)
+ || au_test_nilfs(sb);
+}
+
+static inline int au_test_fs_trunc_xino(struct super_block *sb)
+{
+ return au_test_tmpfs(sb)
+ || au_test_ramfs(sb);
+}
+
+/*
+ * test if the @sb is real-readonly.
+ */
+static inline int au_test_fs_rr(struct super_block *sb)
+{
+ return au_test_squashfs(sb)
+ || au_test_iso9660(sb)
+ || au_test_cramfs(sb)
+ || au_test_romfs(sb);
+}
+
+/*
+ * test if the @inode is nfs with 'noacl' option
+ * NFS always sets SB_POSIXACL regardless its mount option 'noacl.'
+ */
+static inline int au_test_nfs_noacl(struct inode *inode)
+{
+ return au_test_nfs(inode->i_sb)
+ /* && IS_POSIXACL(inode) */
+ && !nfs_server_capable(inode, NFS_CAP_ACLS);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FSTYPE_H__ */
diff --git a/fs/aufs/hbl.h b/fs/aufs/hbl.h
new file mode 100644
index 000000000000..6db65b16b08a
--- /dev/null
+++ b/fs/aufs/hbl.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * helpers for hlist_bl.h
+ */
+
+#ifndef __AUFS_HBL_H__
+#define __AUFS_HBL_H__
+
+#ifdef __KERNEL__
+
+#include <linux/list_bl.h>
+
+static inline void au_hbl_add(struct hlist_bl_node *node,
+ struct hlist_bl_head *hbl)
+{
+ hlist_bl_lock(hbl);
+ hlist_bl_add_head(node, hbl);
+ hlist_bl_unlock(hbl);
+}
+
+static inline void au_hbl_del(struct hlist_bl_node *node,
+ struct hlist_bl_head *hbl)
+{
+ hlist_bl_lock(hbl);
+ hlist_bl_del(node);
+ hlist_bl_unlock(hbl);
+}
+
+#define au_hbl_for_each(pos, head) \
+ for (pos = hlist_bl_first(head); \
+ pos; \
+ pos = pos->next)
+
+static inline unsigned long au_hbl_count(struct hlist_bl_head *hbl)
+{
+ unsigned long cnt;
+ struct hlist_bl_node *pos;
+
+ cnt = 0;
+ hlist_bl_lock(hbl);
+ au_hbl_for_each(pos, hbl)
+ cnt++;
+ hlist_bl_unlock(hbl);
+ return cnt;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_HBL_H__ */
diff --git a/fs/aufs/hfsnotify.c b/fs/aufs/hfsnotify.c
new file mode 100644
index 000000000000..74c5483f8b69
--- /dev/null
+++ b/fs/aufs/hfsnotify.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * fsnotify for the lower directories
+ */
+
+#include "aufs.h"
+
+/* FS_IN_IGNORED is unnecessary */
+static const __u32 AuHfsnMask = (FS_MOVED_TO | FS_MOVED_FROM | FS_DELETE
+ | FS_CREATE | FS_EVENT_ON_CHILD);
+static DECLARE_WAIT_QUEUE_HEAD(au_hfsn_wq);
+static __cacheline_aligned_in_smp atomic64_t au_hfsn_ifree = ATOMIC64_INIT(0);
+
+static void au_hfsn_free_mark(struct fsnotify_mark *mark)
+{
+ struct au_hnotify *hn = container_of(mark, struct au_hnotify,
+ hn_mark);
+ /* AuDbg("here\n"); */
+ au_cache_free_hnotify(hn);
+ smp_mb__before_atomic(); /* for atomic64_dec */
+ if (atomic64_dec_and_test(&au_hfsn_ifree))
+ wake_up(&au_hfsn_wq);
+}
+
+static int au_hfsn_alloc(struct au_hinode *hinode)
+{
+ int err;
+ struct au_hnotify *hn;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct fsnotify_mark *mark;
+ aufs_bindex_t bindex;
+
+ hn = hinode->hi_notify;
+ sb = hn->hn_aufs_inode->i_sb;
+ bindex = au_br_index(sb, hinode->hi_id);
+ br = au_sbr(sb, bindex);
+ AuDebugOn(!br->br_hfsn);
+
+ mark = &hn->hn_mark;
+ fsnotify_init_mark(mark, br->br_hfsn->hfsn_group);
+ mark->mask = AuHfsnMask;
+ /*
+ * by udba rename or rmdir, aufs assign a new inode to the known
+ * h_inode, so specify 1 to allow dups.
+ */
+ lockdep_off();
+ err = fsnotify_add_inode_mark(mark, hinode->hi_inode, /*allow_dups*/1);
+ lockdep_on();
+
+ return err;
+}
+
+static int au_hfsn_free(struct au_hinode *hinode, struct au_hnotify *hn)
+{
+ struct fsnotify_mark *mark;
+ unsigned long long ull;
+ struct fsnotify_group *group;
+
+ ull = atomic64_inc_return(&au_hfsn_ifree);
+ BUG_ON(!ull);
+
+ mark = &hn->hn_mark;
+ spin_lock(&mark->lock);
+ group = mark->group;
+ fsnotify_get_group(group);
+ spin_unlock(&mark->lock);
+ lockdep_off();
+ fsnotify_destroy_mark(mark, group);
+ fsnotify_put_mark(mark);
+ fsnotify_put_group(group);
+ lockdep_on();
+
+ /* free hn by myself */
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_ctl(struct au_hinode *hinode, int do_set)
+{
+ struct fsnotify_mark *mark;
+
+ mark = &hinode->hi_notify->hn_mark;
+ spin_lock(&mark->lock);
+ if (do_set) {
+ AuDebugOn(mark->mask & AuHfsnMask);
+ mark->mask |= AuHfsnMask;
+ } else {
+ AuDebugOn(!(mark->mask & AuHfsnMask));
+ mark->mask &= ~AuHfsnMask;
+ }
+ spin_unlock(&mark->lock);
+ /* fsnotify_recalc_inode_mask(hinode->hi_inode); */
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* #define AuDbgHnotify */
+#ifdef AuDbgHnotify
+static char *au_hfsn_name(u32 mask)
+{
+#ifdef CONFIG_AUFS_DEBUG
+#define test_ret(flag) \
+ do { \
+ if (mask & flag) \
+ return #flag; \
+ } while (0)
+ test_ret(FS_ACCESS);
+ test_ret(FS_MODIFY);
+ test_ret(FS_ATTRIB);
+ test_ret(FS_CLOSE_WRITE);
+ test_ret(FS_CLOSE_NOWRITE);
+ test_ret(FS_OPEN);
+ test_ret(FS_MOVED_FROM);
+ test_ret(FS_MOVED_TO);
+ test_ret(FS_CREATE);
+ test_ret(FS_DELETE);
+ test_ret(FS_DELETE_SELF);
+ test_ret(FS_MOVE_SELF);
+ test_ret(FS_UNMOUNT);
+ test_ret(FS_Q_OVERFLOW);
+ test_ret(FS_IN_IGNORED);
+ test_ret(FS_ISDIR);
+ test_ret(FS_IN_ONESHOT);
+ test_ret(FS_EVENT_ON_CHILD);
+ return "";
+#undef test_ret
+#else
+ return "??";
+#endif
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_free_group(struct fsnotify_group *group)
+{
+ struct au_br_hfsnotify *hfsn = group->private;
+
+ /* AuDbg("here\n"); */
+ au_kfree_try_rcu(hfsn);
+}
+
+static int au_hfsn_handle_event(struct fsnotify_group *group,
+ struct inode *inode,
+ u32 mask, const void *data, int data_type,
+ const unsigned char *file_name, u32 cookie,
+ struct fsnotify_iter_info *iter_info)
+{
+ int err;
+ struct au_hnotify *hnotify;
+ struct inode *h_dir, *h_inode;
+ struct qstr h_child_qstr = QSTR_INIT(file_name, strlen(file_name));
+ struct fsnotify_mark *inode_mark;
+
+ AuDebugOn(data_type != FSNOTIFY_EVENT_INODE);
+
+ err = 0;
+ /* if FS_UNMOUNT happens, there must be another bug */
+ AuDebugOn(mask & FS_UNMOUNT);
+ if (mask & (FS_IN_IGNORED | FS_UNMOUNT))
+ goto out;
+
+ h_dir = inode;
+ h_inode = NULL;
+#ifdef AuDbgHnotify
+ au_debug_on();
+ if (1 || h_child_qstr.len != sizeof(AUFS_XINO_FNAME) - 1
+ || strncmp(h_child_qstr.name, AUFS_XINO_FNAME, h_child_qstr.len)) {
+ AuDbg("i%lu, mask 0x%x %s, hcname %.*s, hi%lu\n",
+ h_dir->i_ino, mask, au_hfsn_name(mask),
+ AuLNPair(&h_child_qstr), h_inode ? h_inode->i_ino : 0);
+ /* WARN_ON(1); */
+ }
+ au_debug_off();
+#endif
+
+ inode_mark = fsnotify_iter_inode_mark(iter_info);
+ AuDebugOn(!inode_mark);
+ hnotify = container_of(inode_mark, struct au_hnotify, hn_mark);
+ err = au_hnotify(h_dir, hnotify, mask, &h_child_qstr, h_inode);
+
+out:
+ return err;
+}
+
+static struct fsnotify_ops au_hfsn_ops = {
+ .handle_event = au_hfsn_handle_event,
+ .free_group_priv = au_hfsn_free_group,
+ .free_mark = au_hfsn_free_mark
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin_br(struct au_branch *br)
+{
+ struct au_br_hfsnotify *hfsn;
+
+ hfsn = br->br_hfsn;
+ if (hfsn) {
+ lockdep_off();
+ fsnotify_put_group(hfsn->hfsn_group);
+ lockdep_on();
+ }
+}
+
+static int au_hfsn_init_br(struct au_branch *br, int perm)
+{
+ int err;
+ struct fsnotify_group *group;
+ struct au_br_hfsnotify *hfsn;
+
+ err = 0;
+ br->br_hfsn = NULL;
+ if (!au_br_hnotifyable(perm))
+ goto out;
+
+ err = -ENOMEM;
+ hfsn = kmalloc(sizeof(*hfsn), GFP_NOFS);
+ if (unlikely(!hfsn))
+ goto out;
+
+ err = 0;
+ group = fsnotify_alloc_group(&au_hfsn_ops);
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ pr_err("fsnotify_alloc_group() failed, %d\n", err);
+ goto out_hfsn;
+ }
+
+ group->private = hfsn;
+ hfsn->hfsn_group = group;
+ br->br_hfsn = hfsn;
+ goto out; /* success */
+
+out_hfsn:
+ au_kfree_try_rcu(hfsn);
+out:
+ return err;
+}
+
+static int au_hfsn_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+ int err;
+
+ err = 0;
+ if (!br->br_hfsn)
+ err = au_hfsn_init_br(br, perm);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin(void)
+{
+ AuDbg("au_hfsn_ifree %lld\n", (long long)atomic64_read(&au_hfsn_ifree));
+ wait_event(au_hfsn_wq, !atomic64_read(&au_hfsn_ifree));
+}
+
+const struct au_hnotify_op au_hnotify_op = {
+ .ctl = au_hfsn_ctl,
+ .alloc = au_hfsn_alloc,
+ .free = au_hfsn_free,
+
+ .fin = au_hfsn_fin,
+
+ .reset_br = au_hfsn_reset_br,
+ .fin_br = au_hfsn_fin_br,
+ .init_br = au_hfsn_init_br
+};
diff --git a/fs/aufs/hfsplus.c b/fs/aufs/hfsplus.c
new file mode 100644
index 000000000000..3bd869a69c8f
--- /dev/null
+++ b/fs/aufs/hfsplus.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * special support for filesystems which acquires an inode mutex
+ * at final closing a file, eg, hfsplus.
+ *
+ * This trick is very simple and stupid, just to open the file before really
+ * necessary open to tell hfsplus that this is not the final closing.
+ * The caller should call au_h_open_pre() after acquiring the inode mutex,
+ * and au_h_open_post() after releasing it.
+ */
+
+#include "aufs.h"
+
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex,
+ int force_wr)
+{
+ struct file *h_file;
+ struct dentry *h_dentry;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ AuDebugOn(!h_dentry);
+ AuDebugOn(d_is_negative(h_dentry));
+
+ h_file = NULL;
+ if (au_test_hfsplus(h_dentry->d_sb)
+ && d_is_reg(h_dentry))
+ h_file = au_h_open(dentry, bindex,
+ O_RDONLY | O_NOATIME | O_LARGEFILE,
+ /*file*/NULL, force_wr);
+ return h_file;
+}
+
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+ struct file *h_file)
+{
+ struct au_branch *br;
+
+ if (h_file) {
+ fput(h_file);
+ br = au_sbr(dentry->d_sb, bindex);
+ au_lcnt_dec(&br->br_nfiles);
+ }
+}
diff --git a/fs/aufs/hnotify.c b/fs/aufs/hnotify.c
new file mode 100644
index 000000000000..234fdf90c6c9
--- /dev/null
+++ b/fs/aufs/hnotify.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * abstraction to notify the direct changes on lower directories
+ */
+
+#include "aufs.h"
+
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode)
+{
+ int err;
+ struct au_hnotify *hn;
+
+ err = -ENOMEM;
+ hn = au_cache_alloc_hnotify();
+ if (hn) {
+ hn->hn_aufs_inode = inode;
+ hinode->hi_notify = hn;
+ err = au_hnotify_op.alloc(hinode);
+ AuTraceErr(err);
+ if (unlikely(err)) {
+ hinode->hi_notify = NULL;
+ au_cache_free_hnotify(hn);
+ /*
+ * The upper dir was removed by udba, but the same named
+ * dir left. In this case, aufs assigns a new inode
+ * number and set the monitor again.
+ * For the lower dir, the old monitor is still left.
+ */
+ if (err == -EEXIST)
+ err = 0;
+ }
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+void au_hn_free(struct au_hinode *hinode)
+{
+ struct au_hnotify *hn;
+
+ hn = hinode->hi_notify;
+ if (hn) {
+ hinode->hi_notify = NULL;
+ if (au_hnotify_op.free(hinode, hn))
+ au_cache_free_hnotify(hn);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_hn_ctl(struct au_hinode *hinode, int do_set)
+{
+ if (hinode->hi_notify)
+ au_hnotify_op.ctl(hinode, do_set);
+}
+
+void au_hn_reset(struct inode *inode, unsigned int flags)
+{
+ aufs_bindex_t bindex, bbot;
+ struct inode *hi;
+ struct dentry *iwhdentry;
+
+ bbot = au_ibbot(inode);
+ for (bindex = au_ibtop(inode); bindex <= bbot; bindex++) {
+ hi = au_h_iptr(inode, bindex);
+ if (!hi)
+ continue;
+
+ /* inode_lock_nested(hi, AuLsc_I_CHILD); */
+ iwhdentry = au_hi_wh(inode, bindex);
+ if (iwhdentry)
+ dget(iwhdentry);
+ au_igrab(hi);
+ au_set_h_iptr(inode, bindex, NULL, 0);
+ au_set_h_iptr(inode, bindex, au_igrab(hi),
+ flags & ~AuHi_XINO);
+ iput(hi);
+ dput(iwhdentry);
+ /* inode_unlock(hi); */
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int hn_xino(struct inode *inode, struct inode *h_inode)
+{
+ int err;
+ aufs_bindex_t bindex, bbot, bfound, btop;
+ struct inode *h_i;
+
+ err = 0;
+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+ pr_warn("branch root dir was changed\n");
+ goto out;
+ }
+
+ bfound = -1;
+ bbot = au_ibbot(inode);
+ btop = au_ibtop(inode);
+#if 0 /* reserved for future use */
+ if (bindex == bbot) {
+ /* keep this ino in rename case */
+ goto out;
+ }
+#endif
+ for (bindex = btop; bindex <= bbot; bindex++)
+ if (au_h_iptr(inode, bindex) == h_inode) {
+ bfound = bindex;
+ break;
+ }
+ if (bfound < 0)
+ goto out;
+
+ for (bindex = btop; bindex <= bbot; bindex++) {
+ h_i = au_h_iptr(inode, bindex);
+ if (!h_i)
+ continue;
+
+ err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0);
+ /* ignore this error */
+ /* bad action? */
+ }
+
+ /* children inode number will be broken */
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int hn_gen_tree(struct dentry *dentry)
+{
+ int err, i, j, ndentry;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry **dentries;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_dcsub_pages(&dpages, dentry, NULL, NULL);
+ if (unlikely(err))
+ goto out_dpages;
+
+ for (i = 0; i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ dentries = dpage->dentries;
+ ndentry = dpage->ndentry;
+ for (j = 0; j < ndentry; j++) {
+ struct dentry *d;
+
+ d = dentries[j];
+ if (IS_ROOT(d))
+ continue;
+
+ au_digen_dec(d);
+ if (d_really_is_positive(d))
+ /* todo: reset children xino?
+ cached children only? */
+ au_iigen_dec(d_inode(d));
+ }
+ }
+
+out_dpages:
+ au_dpages_free(&dpages);
+
+#if 0
+ /* discard children */
+ dentry_unhash(dentry);
+ dput(dentry);
+#endif
+out:
+ return err;
+}
+
+/*
+ * return 0 if processed.
+ */
+static int hn_gen_by_inode(char *name, unsigned int nlen, struct inode *inode,
+ const unsigned int isdir)
+{
+ int err;
+ struct dentry *d;
+ struct qstr *dname;
+
+ err = 1;
+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+ pr_warn("branch root dir was changed\n");
+ err = 0;
+ goto out;
+ }
+
+ if (!isdir) {
+ AuDebugOn(!name);
+ au_iigen_dec(inode);
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&d->d_lock);
+ dname = &d->d_name;
+ if (dname->len != nlen
+ && memcmp(dname->name, name, nlen)) {
+ spin_unlock(&d->d_lock);
+ continue;
+ }
+ err = 0;
+ au_digen_dec(d);
+ spin_unlock(&d->d_lock);
+ break;
+ }
+ spin_unlock(&inode->i_lock);
+ } else {
+ au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIR);
+ d = d_find_any_alias(inode);
+ if (!d) {
+ au_iigen_dec(inode);
+ goto out;
+ }
+
+ spin_lock(&d->d_lock);
+ dname = &d->d_name;
+ if (dname->len == nlen && !memcmp(dname->name, name, nlen)) {
+ spin_unlock(&d->d_lock);
+ err = hn_gen_tree(d);
+ spin_lock(&d->d_lock);
+ }
+ spin_unlock(&d->d_lock);
+ dput(d);
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int hn_gen_by_name(struct dentry *dentry, const unsigned int isdir)
+{
+ int err;
+
+ if (IS_ROOT(dentry)) {
+ pr_warn("branch root dir was changed\n");
+ return 0;
+ }
+
+ err = 0;
+ if (!isdir) {
+ au_digen_dec(dentry);
+ if (d_really_is_positive(dentry))
+ au_iigen_dec(d_inode(dentry));
+ } else {
+ au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR);
+ if (d_really_is_positive(dentry))
+ err = hn_gen_tree(dentry);
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* hnotify job flags */
+#define AuHnJob_XINO0 1
+#define AuHnJob_GEN (1 << 1)
+#define AuHnJob_DIRENT (1 << 2)
+#define AuHnJob_ISDIR (1 << 3)
+#define AuHnJob_TRYXINO0 (1 << 4)
+#define AuHnJob_MNTPNT (1 << 5)
+#define au_ftest_hnjob(flags, name) ((flags) & AuHnJob_##name)
+#define au_fset_hnjob(flags, name) \
+ do { (flags) |= AuHnJob_##name; } while (0)
+#define au_fclr_hnjob(flags, name) \
+ do { (flags) &= ~AuHnJob_##name; } while (0)
+
+enum {
+ AuHn_CHILD,
+ AuHn_PARENT,
+ AuHnLast
+};
+
+struct au_hnotify_args {
+ struct inode *h_dir, *dir, *h_child_inode;
+ u32 mask;
+ unsigned int flags[AuHnLast];
+ unsigned int h_child_nlen;
+ char h_child_name[];
+};
+
+struct hn_job_args {
+ unsigned int flags;
+ struct inode *inode, *h_inode, *dir, *h_dir;
+ struct dentry *dentry;
+ char *h_name;
+ int h_nlen;
+};
+
+static int hn_job(struct hn_job_args *a)
+{
+ const unsigned int isdir = au_ftest_hnjob(a->flags, ISDIR);
+ int e;
+
+ /* reset xino */
+ if (au_ftest_hnjob(a->flags, XINO0) && a->inode)
+ hn_xino(a->inode, a->h_inode); /* ignore this error */
+
+ if (au_ftest_hnjob(a->flags, TRYXINO0)
+ && a->inode
+ && a->h_inode) {
+ inode_lock_shared_nested(a->h_inode, AuLsc_I_CHILD);
+ if (!a->h_inode->i_nlink
+ && !(a->h_inode->i_state & I_LINKABLE))
+ hn_xino(a->inode, a->h_inode); /* ignore this error */
+ inode_unlock_shared(a->h_inode);
+ }
+
+ /* make the generation obsolete */
+ if (au_ftest_hnjob(a->flags, GEN)) {
+ e = -1;
+ if (a->inode)
+ e = hn_gen_by_inode(a->h_name, a->h_nlen, a->inode,
+ isdir);
+ if (e && a->dentry)
+ hn_gen_by_name(a->dentry, isdir);
+ /* ignore this error */
+ }
+
+ /* make dir entries obsolete */
+ if (au_ftest_hnjob(a->flags, DIRENT) && a->inode) {
+ struct au_vdir *vdir;
+
+ vdir = au_ivdir(a->inode);
+ if (vdir)
+ vdir->vd_jiffy = 0;
+ /* IMustLock(a->inode); */
+ /* inode_inc_iversion(a->inode); */
+ }
+
+ /* can do nothing but warn */
+ if (au_ftest_hnjob(a->flags, MNTPNT)
+ && a->dentry
+ && d_mountpoint(a->dentry))
+ pr_warn("mount-point %pd is removed or renamed\n", a->dentry);
+
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen,
+ struct inode *dir)
+{
+ struct dentry *dentry, *d, *parent;
+ struct qstr *dname;
+
+ parent = d_find_any_alias(dir);
+ if (!parent)
+ return NULL;
+
+ dentry = NULL;
+ spin_lock(&parent->d_lock);
+ list_for_each_entry(d, &parent->d_subdirs, d_child) {
+ /* AuDbg("%pd\n", d); */
+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ dname = &d->d_name;
+ if (dname->len != nlen || memcmp(dname->name, name, nlen))
+ goto cont_unlock;
+ if (au_di(d))
+ au_digen_dec(d);
+ else
+ goto cont_unlock;
+ if (au_dcount(d) > 0) {
+ dentry = dget_dlock(d);
+ spin_unlock(&d->d_lock);
+ break;
+ }
+
+cont_unlock:
+ spin_unlock(&d->d_lock);
+ }
+ spin_unlock(&parent->d_lock);
+ dput(parent);
+
+ if (dentry)
+ di_write_lock_child(dentry);
+
+ return dentry;
+}
+
+static struct inode *lookup_wlock_by_ino(struct super_block *sb,
+ aufs_bindex_t bindex, ino_t h_ino)
+{
+ struct inode *inode;
+ ino_t ino;
+ int err;
+
+ inode = NULL;
+ err = au_xino_read(sb, bindex, h_ino, &ino);
+ if (!err && ino)
+ inode = ilookup(sb, ino);
+ if (!inode)
+ goto out;
+
+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+ pr_warn("wrong root branch\n");
+ iput(inode);
+ inode = NULL;
+ goto out;
+ }
+
+ ii_write_lock_child(inode);
+
+out:
+ return inode;
+}
+
+static void au_hn_bh(void *_args)
+{
+ struct au_hnotify_args *a = _args;
+ struct super_block *sb;
+ aufs_bindex_t bindex, bbot, bfound;
+ unsigned char xino, try_iput;
+ int err;
+ struct inode *inode;
+ ino_t h_ino;
+ struct hn_job_args args;
+ struct dentry *dentry;
+ struct au_sbinfo *sbinfo;
+
+ AuDebugOn(!_args);
+ AuDebugOn(!a->h_dir);
+ AuDebugOn(!a->dir);
+ AuDebugOn(!a->mask);
+ AuDbg("mask 0x%x, i%lu, hi%lu, hci%lu\n",
+ a->mask, a->dir->i_ino, a->h_dir->i_ino,
+ a->h_child_inode ? a->h_child_inode->i_ino : 0);
+
+ inode = NULL;
+ dentry = NULL;
+ /*
+ * do not lock a->dir->i_mutex here
+ * because of d_revalidate() may cause a deadlock.
+ */
+ sb = a->dir->i_sb;
+ AuDebugOn(!sb);
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!sbinfo);
+ si_write_lock(sb, AuLock_NOPLMW);
+
+ if (au_opt_test(sbinfo->si_mntflags, DIRREN))
+ switch (a->mask & FS_EVENTS_POSS_ON_CHILD) {
+ case FS_MOVED_FROM:
+ case FS_MOVED_TO:
+ AuWarn1("DIRREN with UDBA may not work correctly "
+ "for the direct rename(2)\n");
+ }
+
+ ii_read_lock_parent(a->dir);
+ bfound = -1;
+ bbot = au_ibbot(a->dir);
+ for (bindex = au_ibtop(a->dir); bindex <= bbot; bindex++)
+ if (au_h_iptr(a->dir, bindex) == a->h_dir) {
+ bfound = bindex;
+ break;
+ }
+ ii_read_unlock(a->dir);
+ if (unlikely(bfound < 0))
+ goto out;
+
+ xino = !!au_opt_test(au_mntflags(sb), XINO);
+ h_ino = 0;
+ if (a->h_child_inode)
+ h_ino = a->h_child_inode->i_ino;
+
+ if (a->h_child_nlen
+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], GEN)
+ || au_ftest_hnjob(a->flags[AuHn_CHILD], MNTPNT)))
+ dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen,
+ a->dir);
+ try_iput = 0;
+ if (dentry && d_really_is_positive(dentry))
+ inode = d_inode(dentry);
+ if (xino && !inode && h_ino
+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], XINO0)
+ || au_ftest_hnjob(a->flags[AuHn_CHILD], TRYXINO0)
+ || au_ftest_hnjob(a->flags[AuHn_CHILD], GEN))) {
+ inode = lookup_wlock_by_ino(sb, bfound, h_ino);
+ try_iput = 1;
+ }
+
+ args.flags = a->flags[AuHn_CHILD];
+ args.dentry = dentry;
+ args.inode = inode;
+ args.h_inode = a->h_child_inode;
+ args.dir = a->dir;
+ args.h_dir = a->h_dir;
+ args.h_name = a->h_child_name;
+ args.h_nlen = a->h_child_nlen;
+ err = hn_job(&args);
+ if (dentry) {
+ if (au_di(dentry))
+ di_write_unlock(dentry);
+ dput(dentry);
+ }
+ if (inode && try_iput) {
+ ii_write_unlock(inode);
+ iput(inode);
+ }
+
+ ii_write_lock_parent(a->dir);
+ args.flags = a->flags[AuHn_PARENT];
+ args.dentry = NULL;
+ args.inode = a->dir;
+ args.h_inode = a->h_dir;
+ args.dir = NULL;
+ args.h_dir = NULL;
+ args.h_name = NULL;
+ args.h_nlen = 0;
+ err = hn_job(&args);
+ ii_write_unlock(a->dir);
+
+out:
+ iput(a->h_child_inode);
+ iput(a->h_dir);
+ iput(a->dir);
+ si_write_unlock(sb);
+ au_nwt_done(&sbinfo->si_nowait);
+ au_kfree_rcu(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+ struct qstr *h_child_qstr, struct inode *h_child_inode)
+{
+ int err, len;
+ unsigned int flags[AuHnLast], f;
+ unsigned char isdir, isroot, wh;
+ struct inode *dir;
+ struct au_hnotify_args *args;
+ char *p, *h_child_name;
+
+ err = 0;
+ AuDebugOn(!hnotify || !hnotify->hn_aufs_inode);
+ dir = igrab(hnotify->hn_aufs_inode);
+ if (!dir)
+ goto out;
+
+ isroot = (dir->i_ino == AUFS_ROOT_INO);
+ wh = 0;
+ h_child_name = (void *)h_child_qstr->name;
+ len = h_child_qstr->len;
+ if (h_child_name) {
+ if (len > AUFS_WH_PFX_LEN
+ && !memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+ h_child_name += AUFS_WH_PFX_LEN;
+ len -= AUFS_WH_PFX_LEN;
+ wh = 1;
+ }
+ }
+
+ isdir = 0;
+ if (h_child_inode)
+ isdir = !!S_ISDIR(h_child_inode->i_mode);
+ flags[AuHn_PARENT] = AuHnJob_ISDIR;
+ flags[AuHn_CHILD] = 0;
+ if (isdir)
+ flags[AuHn_CHILD] = AuHnJob_ISDIR;
+ au_fset_hnjob(flags[AuHn_PARENT], DIRENT);
+ au_fset_hnjob(flags[AuHn_CHILD], GEN);
+ switch (mask & FS_EVENTS_POSS_ON_CHILD) {
+ case FS_MOVED_FROM:
+ case FS_MOVED_TO:
+ au_fset_hnjob(flags[AuHn_CHILD], XINO0);
+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+ /*FALLTHROUGH*/
+ case FS_CREATE:
+ AuDebugOn(!h_child_name);
+ break;
+
+ case FS_DELETE:
+ /*
+ * aufs never be able to get this child inode.
+ * revalidation should be in d_revalidate()
+ * by checking i_nlink, i_generation or d_unhashed().
+ */
+ AuDebugOn(!h_child_name);
+ au_fset_hnjob(flags[AuHn_CHILD], TRYXINO0);
+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+ break;
+
+ default:
+ AuDebugOn(1);
+ }
+
+ if (wh)
+ h_child_inode = NULL;
+
+ err = -ENOMEM;
+ /* iput() and kfree() will be called in au_hnotify() */
+ args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS);
+ if (unlikely(!args)) {
+ AuErr1("no memory\n");
+ iput(dir);
+ goto out;
+ }
+ args->flags[AuHn_PARENT] = flags[AuHn_PARENT];
+ args->flags[AuHn_CHILD] = flags[AuHn_CHILD];
+ args->mask = mask;
+ args->dir = dir;
+ args->h_dir = igrab(h_dir);
+ if (h_child_inode)
+ h_child_inode = igrab(h_child_inode); /* can be NULL */
+ args->h_child_inode = h_child_inode;
+ args->h_child_nlen = len;
+ if (len) {
+ p = (void *)args;
+ p += sizeof(*args);
+ memcpy(p, h_child_name, len);
+ p[len] = 0;
+ }
+
+ /* NFS fires the event for silly-renamed one from kworker */
+ f = 0;
+ if (!dir->i_nlink
+ || (au_test_nfs(h_dir->i_sb) && (mask & FS_DELETE)))
+ f = AuWkq_NEST;
+ err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f);
+ if (unlikely(err)) {
+ pr_err("wkq %d\n", err);
+ iput(args->h_child_inode);
+ iput(args->h_dir);
+ iput(args->dir);
+ au_kfree_rcu(args);
+ }
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+ int err;
+
+ AuDebugOn(!(udba & AuOptMask_UDBA));
+
+ err = 0;
+ if (au_hnotify_op.reset_br)
+ err = au_hnotify_op.reset_br(udba, br, perm);
+
+ return err;
+}
+
+int au_hnotify_init_br(struct au_branch *br, int perm)
+{
+ int err;
+
+ err = 0;
+ if (au_hnotify_op.init_br)
+ err = au_hnotify_op.init_br(br, perm);
+
+ return err;
+}
+
+void au_hnotify_fin_br(struct au_branch *br)
+{
+ if (au_hnotify_op.fin_br)
+ au_hnotify_op.fin_br(br);
+}
+
+static void au_hn_destroy_cache(void)
+{
+ kmem_cache_destroy(au_cache[AuCache_HNOTIFY]);
+ au_cache[AuCache_HNOTIFY] = NULL;
+}
+
+int __init au_hnotify_init(void)
+{
+ int err;
+
+ err = -ENOMEM;
+ au_cache[AuCache_HNOTIFY] = AuCache(au_hnotify);
+ if (au_cache[AuCache_HNOTIFY]) {
+ err = 0;
+ if (au_hnotify_op.init)
+ err = au_hnotify_op.init();
+ if (unlikely(err))
+ au_hn_destroy_cache();
+ }
+ AuTraceErr(err);
+ return err;
+}
+
+void au_hnotify_fin(void)
+{
+ if (au_hnotify_op.fin)
+ au_hnotify_op.fin();
+
+ /* cf. au_cache_fin() */
+ if (au_cache[AuCache_HNOTIFY])
+ au_hn_destroy_cache();
+}
diff --git a/fs/aufs/i_op.c b/fs/aufs/i_op.c
new file mode 100644
index 000000000000..6ead821406f6
--- /dev/null
+++ b/fs/aufs/i_op.c
@@ -0,0 +1,1506 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operations (except add/del/rename)
+ */
+
+#include <linux/device_cgroup.h>
+#include <linux/fs_stack.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+static int h_permission(struct inode *h_inode, int mask,
+ struct path *h_path, int brperm)
+{
+ int err;
+ const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+
+ err = -EPERM;
+ if (write_mask && IS_IMMUTABLE(h_inode))
+ goto out;
+
+ err = -EACCES;
+ if (((mask & MAY_EXEC)
+ && S_ISREG(h_inode->i_mode)
+ && (path_noexec(h_path)
+ || !(h_inode->i_mode & 0111))))
+ goto out;
+
+ /*
+ * - skip the lower fs test in the case of write to ro branch.
+ * - nfs dir permission write check is optimized, but a policy for
+ * link/rename requires a real check.
+ * - nfs always sets SB_POSIXACL regardless its mount option 'noacl.'
+ * in this case, generic_permission() returns -EOPNOTSUPP.
+ */
+ if ((write_mask && !au_br_writable(brperm))
+ || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode)
+ && write_mask && !(mask & MAY_READ))
+ || !h_inode->i_op->permission) {
+ /* AuLabel(generic_permission); */
+ /* AuDbg("get_acl %ps\n", h_inode->i_op->get_acl); */
+ err = generic_permission(h_inode, mask);
+ if (err == -EOPNOTSUPP && au_test_nfs_noacl(h_inode))
+ err = h_inode->i_op->permission(h_inode, mask);
+ AuTraceErr(err);
+ } else {
+ /* AuLabel(h_inode->permission); */
+ err = h_inode->i_op->permission(h_inode, mask);
+ AuTraceErr(err);
+ }
+
+ if (!err)
+ err = devcgroup_inode_permission(h_inode, mask);
+ if (!err)
+ err = security_inode_permission(h_inode, mask);
+
+#if 0
+ if (!err) {
+ /* todo: do we need to call ima_path_check()? */
+ struct path h_path = {
+ .dentry =
+ .mnt = h_mnt
+ };
+ err = ima_path_check(&h_path,
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+ IMA_COUNT_LEAVE);
+ }
+#endif
+
+out:
+ return err;
+}
+
+static int aufs_permission(struct inode *inode, int mask)
+{
+ int err;
+ aufs_bindex_t bindex, bbot;
+ const unsigned char isdir = !!S_ISDIR(inode->i_mode),
+ write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+ struct inode *h_inode;
+ struct super_block *sb;
+ struct au_branch *br;
+
+ /* todo: support rcu-walk? */
+ if (mask & MAY_NOT_BLOCK)
+ return -ECHILD;
+
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ ii_read_lock_child(inode);
+#if 0
+ err = au_iigen_test(inode, au_sigen(sb));
+ if (unlikely(err))
+ goto out;
+#endif
+
+ if (!isdir
+ || write_mask
+ || au_opt_test(au_mntflags(sb), DIRPERM1)) {
+ err = au_busy_or_stale();
+ h_inode = au_h_iptr(inode, au_ibtop(inode));
+ if (unlikely(!h_inode
+ || (h_inode->i_mode & S_IFMT)
+ != (inode->i_mode & S_IFMT)))
+ goto out;
+
+ err = 0;
+ bindex = au_ibtop(inode);
+ br = au_sbr(sb, bindex);
+ err = h_permission(h_inode, mask, &br->br_path, br->br_perm);
+ if (write_mask
+ && !err
+ && !special_file(h_inode->i_mode)) {
+ /* test whether the upper writable branch exists */
+ err = -EROFS;
+ for (; bindex >= 0; bindex--)
+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
+ err = 0;
+ break;
+ }
+ }
+ goto out;
+ }
+
+ /* non-write to dir */
+ err = 0;
+ bbot = au_ibbot(inode);
+ for (bindex = au_ibtop(inode); !err && bindex <= bbot; bindex++) {
+ h_inode = au_h_iptr(inode, bindex);
+ if (h_inode) {
+ err = au_busy_or_stale();
+ if (unlikely(!S_ISDIR(h_inode->i_mode)))
+ break;
+
+ br = au_sbr(sb, bindex);
+ err = h_permission(h_inode, mask, &br->br_path,
+ br->br_perm);
+ }
+ }
+
+out:
+ ii_read_unlock(inode);
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct dentry *ret, *parent;
+ struct inode *inode;
+ struct super_block *sb;
+ int err, npositive;
+
+ IMustLock(dir);
+
+ /* todo: support rcu-walk? */
+ ret = ERR_PTR(-ECHILD);
+ if (flags & LOOKUP_RCU)
+ goto out;
+
+ ret = ERR_PTR(-ENAMETOOLONG);
+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+ goto out;
+
+ sb = dir->i_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ ret = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ err = au_di_init(dentry);
+ ret = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_si;
+
+ inode = NULL;
+ npositive = 0; /* suppress a warning */
+ parent = dentry->d_parent; /* dir inode is locked */
+ di_read_lock_parent(parent, AuLock_IR);
+ err = au_alive_dir(parent);
+ if (!err)
+ err = au_digen_test(parent, au_sigen(sb));
+ if (!err) {
+ /* regardless LOOKUP_CREATE, always ALLOW_NEG */
+ npositive = au_lkup_dentry(dentry, au_dbtop(parent),
+ AuLkup_ALLOW_NEG);
+ err = npositive;
+ }
+ di_read_unlock(parent, AuLock_IR);
+ ret = ERR_PTR(err);
+ if (unlikely(err < 0))
+ goto out_unlock;
+
+ if (npositive) {
+ inode = au_new_inode(dentry, /*must_new*/0);
+ if (IS_ERR(inode)) {
+ ret = (void *)inode;
+ inode = NULL;
+ goto out_unlock;
+ }
+ }
+
+ if (inode)
+ atomic_inc(&inode->i_count);
+ ret = d_splice_alias(inode, dentry);
+#if 0
+ if (unlikely(d_need_lookup(dentry))) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+ spin_unlock(&dentry->d_lock);
+ } else
+#endif
+ if (inode) {
+ if (!IS_ERR(ret)) {
+ iput(inode);
+ if (ret && ret != dentry)
+ ii_write_unlock(inode);
+ } else {
+ ii_write_unlock(inode);
+ iput(inode);
+ inode = NULL;
+ }
+ }
+
+out_unlock:
+ di_write_unlock(dentry);
+out_si:
+ si_read_unlock(sb);
+out:
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * very dirty and complicated aufs ->atomic_open().
+ * aufs_atomic_open()
+ * + au_aopen_or_create()
+ * + add_simple()
+ * + vfsub_atomic_open()
+ * + branch fs ->atomic_open()
+ * may call the actual 'open' for h_file
+ * + inc br_nfiles only if opened
+ * + au_aopen_no_open() or au_aopen_do_open()
+ *
+ * au_aopen_do_open()
+ * + finish_open()
+ * + au_do_aopen()
+ * + au_do_open() the body of all 'open'
+ * + au_do_open_nondir()
+ * set the passed h_file
+ *
+ * au_aopen_no_open()
+ * + finish_no_open()
+ */
+
+struct aopen_node {
+ struct hlist_bl_node hblist;
+ struct file *file, *h_file;
+};
+
+static int au_do_aopen(struct inode *inode, struct file *file)
+{
+ struct hlist_bl_head *aopen;
+ struct hlist_bl_node *pos;
+ struct aopen_node *node;
+ struct au_do_open_args args = {
+ .aopen = 1,
+ .open = au_do_open_nondir
+ };
+
+ aopen = &au_sbi(inode->i_sb)->si_aopen;
+ hlist_bl_lock(aopen);
+ hlist_bl_for_each_entry(node, pos, aopen, hblist)
+ if (node->file == file) {
+ args.h_file = node->h_file;
+ break;
+ }
+ hlist_bl_unlock(aopen);
+ /* AuDebugOn(!args.h_file); */
+
+ return au_do_open(file, &args);
+}
+
+static int au_aopen_do_open(struct file *file, struct dentry *dentry,
+ struct aopen_node *aopen_node)
+{
+ int err;
+ struct hlist_bl_head *aopen;
+
+ AuLabel(here);
+ aopen = &au_sbi(dentry->d_sb)->si_aopen;
+ au_hbl_add(&aopen_node->hblist, aopen);
+ err = finish_open(file, dentry, au_do_aopen);
+ au_hbl_del(&aopen_node->hblist, aopen);
+ /* AuDbgFile(file); */
+ AuDbg("%pd%s%s\n", dentry,
+ (file->f_mode & FMODE_CREATED) ? " created" : "",
+ (file->f_mode & FMODE_OPENED) ? " opened" : "");
+
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_aopen_no_open(struct file *file, struct dentry *dentry)
+{
+ int err;
+
+ AuLabel(here);
+ dget(dentry);
+ err = finish_no_open(file, dentry);
+
+ AuTraceErr(err);
+ return err;
+}
+
+static int aufs_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned int open_flag,
+ umode_t create_mode)
+{
+ int err, did_open;
+ unsigned int lkup_flags;
+ aufs_bindex_t bindex;
+ struct super_block *sb;
+ struct dentry *parent, *d;
+ struct vfsub_aopen_args args = {
+ .open_flag = open_flag,
+ .create_mode = create_mode
+ };
+ struct aopen_node aopen_node = {
+ .file = file
+ };
+
+ IMustLock(dir);
+ AuDbg("open_flag 0%o\n", open_flag);
+ AuDbgDentry(dentry);
+
+ err = 0;
+ if (!au_di(dentry)) {
+ lkup_flags = LOOKUP_OPEN;
+ if (open_flag & O_CREAT)
+ lkup_flags |= LOOKUP_CREATE;
+ d = aufs_lookup(dir, dentry, lkup_flags);
+ if (IS_ERR(d)) {
+ err = PTR_ERR(d);
+ AuTraceErr(err);
+ goto out;
+ } else if (d) {
+ /*
+ * obsoleted dentry found.
+ * another error will be returned later.
+ */
+ d_drop(d);
+ AuDbgDentry(d);
+ dput(d);
+ }
+ AuDbgDentry(dentry);
+ }
+
+ if (d_is_positive(dentry)
+ || d_unhashed(dentry)
+ || d_unlinked(dentry)
+ || !(open_flag & O_CREAT)) {
+ err = au_aopen_no_open(file, dentry);
+ goto out; /* success */
+ }
+
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+ if (unlikely(err))
+ goto out;
+
+ sb = dentry->d_sb;
+ parent = dentry->d_parent; /* dir is locked */
+ di_write_lock_parent(parent);
+ err = au_lkup_dentry(dentry, /*btop*/0, AuLkup_ALLOW_NEG);
+ if (unlikely(err < 0))
+ goto out_parent;
+
+ AuDbgDentry(dentry);
+ if (d_is_positive(dentry)) {
+ err = au_aopen_no_open(file, dentry);
+ goto out_parent; /* success */
+ }
+
+ args.file = alloc_empty_file(file->f_flags, current_cred());
+ err = PTR_ERR(args.file);
+ if (IS_ERR(args.file))
+ goto out_parent;
+
+ bindex = au_dbtop(dentry);
+ err = au_aopen_or_create(dir, dentry, &args);
+ AuTraceErr(err);
+ AuDbgFile(args.file);
+ file->f_mode = args.file->f_mode & ~FMODE_OPENED;
+ did_open = !!(args.file->f_mode & FMODE_OPENED);
+ if (!did_open) {
+ fput(args.file);
+ args.file = NULL;
+ }
+ di_write_unlock(parent);
+ di_write_unlock(dentry);
+ if (unlikely(err < 0)) {
+ if (args.file)
+ fput(args.file);
+ goto out_sb;
+ }
+
+ if (!did_open)
+ err = au_aopen_no_open(file, dentry);
+ else {
+ aopen_node.h_file = args.file;
+ err = au_aopen_do_open(file, dentry, &aopen_node);
+ }
+ if (unlikely(err < 0)) {
+ if (args.file)
+ fput(args.file);
+ if (did_open)
+ au_lcnt_dec(&args.br->br_nfiles);
+ }
+ goto out_sb; /* success */
+
+out_parent:
+ di_write_unlock(parent);
+ di_write_unlock(dentry);
+out_sb:
+ si_read_unlock(sb);
+out:
+ AuTraceErr(err);
+ AuDbgFile(file);
+ return err;
+}
+
+
+/* ---------------------------------------------------------------------- */
+
+static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent,
+ const unsigned char add_entry, aufs_bindex_t bcpup,
+ aufs_bindex_t btop)
+{
+ int err;
+ struct dentry *h_parent;
+ struct inode *h_dir;
+
+ if (add_entry)
+ IMustLock(d_inode(parent));
+ else
+ di_write_lock_parent(parent);
+
+ err = 0;
+ if (!au_h_dptr(parent, bcpup)) {
+ if (btop > bcpup)
+ err = au_cpup_dirs(dentry, bcpup);
+ else if (btop < bcpup)
+ err = au_cpdown_dirs(dentry, bcpup);
+ else
+ BUG();
+ }
+ if (!err && add_entry && !au_ftest_wrdir(add_entry, TMPFILE)) {
+ h_parent = au_h_dptr(parent, bcpup);
+ h_dir = d_inode(h_parent);
+ inode_lock_shared_nested(h_dir, AuLsc_I_PARENT);
+ err = au_lkup_neg(dentry, bcpup, /*wh*/0);
+ /* todo: no unlock here */
+ inode_unlock_shared(h_dir);
+
+ AuDbg("bcpup %d\n", bcpup);
+ if (!err) {
+ if (d_really_is_negative(dentry))
+ au_set_h_dptr(dentry, btop, NULL);
+ au_update_dbrange(dentry, /*do_put_zero*/0);
+ }
+ }
+
+ if (!add_entry)
+ di_write_unlock(parent);
+ if (!err)
+ err = bcpup; /* success */
+
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * decide the branch and the parent dir where we will create a new entry.
+ * returns new bindex or an error.
+ * copyup the parent dir if needed.
+ */
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+ struct au_wr_dir_args *args)
+{
+ int err;
+ unsigned int flags;
+ aufs_bindex_t bcpup, btop, src_btop;
+ const unsigned char add_entry
+ = au_ftest_wrdir(args->flags, ADD_ENTRY)
+ | au_ftest_wrdir(args->flags, TMPFILE);
+ struct super_block *sb;
+ struct dentry *parent;
+ struct au_sbinfo *sbinfo;
+
+ sb = dentry->d_sb;
+ sbinfo = au_sbi(sb);
+ parent = dget_parent(dentry);
+ btop = au_dbtop(dentry);
+ bcpup = btop;
+ if (args->force_btgt < 0) {
+ if (src_dentry) {
+ src_btop = au_dbtop(src_dentry);
+ if (src_btop < btop)
+ bcpup = src_btop;
+ } else if (add_entry) {
+ flags = 0;
+ if (au_ftest_wrdir(args->flags, ISDIR))
+ au_fset_wbr(flags, DIR);
+ err = AuWbrCreate(sbinfo, dentry, flags);
+ bcpup = err;
+ }
+
+ if (bcpup < 0 || au_test_ro(sb, bcpup, d_inode(dentry))) {
+ if (add_entry)
+ err = AuWbrCopyup(sbinfo, dentry);
+ else {
+ if (!IS_ROOT(dentry)) {
+ di_read_lock_parent(parent, !AuLock_IR);
+ err = AuWbrCopyup(sbinfo, dentry);
+ di_read_unlock(parent, !AuLock_IR);
+ } else
+ err = AuWbrCopyup(sbinfo, dentry);
+ }
+ bcpup = err;
+ if (unlikely(err < 0))
+ goto out;
+ }
+ } else {
+ bcpup = args->force_btgt;
+ AuDebugOn(au_test_ro(sb, bcpup, d_inode(dentry)));
+ }
+
+ AuDbg("btop %d, bcpup %d\n", btop, bcpup);
+ err = bcpup;
+ if (bcpup == btop)
+ goto out; /* success */
+
+ /* copyup the new parent into the branch we process */
+ err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, btop);
+ if (err >= 0) {
+ if (d_really_is_negative(dentry)) {
+ au_set_h_dptr(dentry, btop, NULL);
+ au_set_dbtop(dentry, bcpup);
+ au_set_dbbot(dentry, bcpup);
+ }
+ AuDebugOn(add_entry
+ && !au_ftest_wrdir(args->flags, TMPFILE)
+ && !au_h_dptr(dentry, bcpup));
+ }
+
+out:
+ dput(parent);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_pin_hdir_unlock(struct au_pin *p)
+{
+ if (p->hdir)
+ au_hn_inode_unlock(p->hdir);
+}
+
+int au_pin_hdir_lock(struct au_pin *p)
+{
+ int err;
+
+ err = 0;
+ if (!p->hdir)
+ goto out;
+
+ /* even if an error happens later, keep this lock */
+ au_hn_inode_lock_nested(p->hdir, p->lsc_hi);
+
+ err = -EBUSY;
+ if (unlikely(p->hdir->hi_inode != d_inode(p->h_parent)))
+ goto out;
+
+ err = 0;
+ if (p->h_dentry)
+ err = au_h_verify(p->h_dentry, p->udba, p->hdir->hi_inode,
+ p->h_parent, p->br);
+
+out:
+ return err;
+}
+
+int au_pin_hdir_relock(struct au_pin *p)
+{
+ int err, i;
+ struct inode *h_i;
+ struct dentry *h_d[] = {
+ p->h_dentry,
+ p->h_parent
+ };
+
+ err = au_pin_hdir_lock(p);
+ if (unlikely(err))
+ goto out;
+
+ for (i = 0; !err && i < sizeof(h_d)/sizeof(*h_d); i++) {
+ if (!h_d[i])
+ continue;
+ if (d_is_positive(h_d[i])) {
+ h_i = d_inode(h_d[i]);
+ err = !h_i->i_nlink;
+ }
+ }
+
+out:
+ return err;
+}
+
+static void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task)
+{
+#if !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) && defined(CONFIG_RWSEM_SPIN_ON_OWNER)
+ p->hdir->hi_inode->i_rwsem.owner = task;
+#endif
+}
+
+void au_pin_hdir_acquire_nest(struct au_pin *p)
+{
+ if (p->hdir) {
+ rwsem_acquire_nest(&p->hdir->hi_inode->i_rwsem.dep_map,
+ p->lsc_hi, 0, NULL, _RET_IP_);
+ au_pin_hdir_set_owner(p, current);
+ }
+}
+
+void au_pin_hdir_release(struct au_pin *p)
+{
+ if (p->hdir) {
+ au_pin_hdir_set_owner(p, p->task);
+ rwsem_release(&p->hdir->hi_inode->i_rwsem.dep_map, 1, _RET_IP_);
+ }
+}
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin)
+{
+ if (pin && pin->parent)
+ return au_h_dptr(pin->parent, pin->bindex);
+ return NULL;
+}
+
+void au_unpin(struct au_pin *p)
+{
+ if (p->hdir)
+ au_pin_hdir_unlock(p);
+ if (p->h_mnt && au_ftest_pin(p->flags, MNT_WRITE))
+ vfsub_mnt_drop_write(p->h_mnt);
+ if (!p->hdir)
+ return;
+
+ if (!au_ftest_pin(p->flags, DI_LOCKED))
+ di_read_unlock(p->parent, AuLock_IR);
+ iput(p->hdir->hi_inode);
+ dput(p->parent);
+ p->parent = NULL;
+ p->hdir = NULL;
+ p->h_mnt = NULL;
+ /* do not clear p->task */
+}
+
+int au_do_pin(struct au_pin *p)
+{
+ int err;
+ struct super_block *sb;
+ struct inode *h_dir;
+
+ err = 0;
+ sb = p->dentry->d_sb;
+ p->br = au_sbr(sb, p->bindex);
+ if (IS_ROOT(p->dentry)) {
+ if (au_ftest_pin(p->flags, MNT_WRITE)) {
+ p->h_mnt = au_br_mnt(p->br);
+ err = vfsub_mnt_want_write(p->h_mnt);
+ if (unlikely(err)) {
+ au_fclr_pin(p->flags, MNT_WRITE);
+ goto out_err;
+ }
+ }
+ goto out;
+ }
+
+ p->h_dentry = NULL;
+ if (p->bindex <= au_dbbot(p->dentry))
+ p->h_dentry = au_h_dptr(p->dentry, p->bindex);
+
+ p->parent = dget_parent(p->dentry);
+ if (!au_ftest_pin(p->flags, DI_LOCKED))
+ di_read_lock(p->parent, AuLock_IR, p->lsc_di);
+
+ h_dir = NULL;
+ p->h_parent = au_h_dptr(p->parent, p->bindex);
+ p->hdir = au_hi(d_inode(p->parent), p->bindex);
+ if (p->hdir)
+ h_dir = p->hdir->hi_inode;
+
+ /*
+ * udba case, or
+ * if DI_LOCKED is not set, then p->parent may be different
+ * and h_parent can be NULL.
+ */
+ if (unlikely(!p->hdir || !h_dir || !p->h_parent)) {
+ err = -EBUSY;
+ if (!au_ftest_pin(p->flags, DI_LOCKED))
+ di_read_unlock(p->parent, AuLock_IR);
+ dput(p->parent);
+ p->parent = NULL;
+ goto out_err;
+ }
+
+ if (au_ftest_pin(p->flags, MNT_WRITE)) {
+ p->h_mnt = au_br_mnt(p->br);
+ err = vfsub_mnt_want_write(p->h_mnt);
+ if (unlikely(err)) {
+ au_fclr_pin(p->flags, MNT_WRITE);
+ if (!au_ftest_pin(p->flags, DI_LOCKED))
+ di_read_unlock(p->parent, AuLock_IR);
+ dput(p->parent);
+ p->parent = NULL;
+ goto out_err;
+ }
+ }
+
+ au_igrab(h_dir);
+ err = au_pin_hdir_lock(p);
+ if (!err)
+ goto out; /* success */
+
+ au_unpin(p);
+
+out_err:
+ pr_err("err %d\n", err);
+ err = au_busy_or_stale();
+out:
+ return err;
+}
+
+void au_pin_init(struct au_pin *p, struct dentry *dentry,
+ aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+ unsigned int udba, unsigned char flags)
+{
+ p->dentry = dentry;
+ p->udba = udba;
+ p->lsc_di = lsc_di;
+ p->lsc_hi = lsc_hi;
+ p->flags = flags;
+ p->bindex = bindex;
+
+ p->parent = NULL;
+ p->hdir = NULL;
+ p->h_mnt = NULL;
+
+ p->h_dentry = NULL;
+ p->h_parent = NULL;
+ p->br = NULL;
+ p->task = current;
+}
+
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int udba, unsigned char flags)
+{
+ au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2,
+ udba, flags);
+ return au_do_pin(pin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * ->setattr() and ->getattr() are called in various cases.
+ * chmod, stat: dentry is revalidated.
+ * fchmod, fstat: file and dentry are not revalidated, additionally they may be
+ * unhashed.
+ * for ->setattr(), ia->ia_file is passed from ftruncate only.
+ */
+/* todo: consolidate with do_refresh() and simple_reval_dpath() */
+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen)
+{
+ int err;
+ struct dentry *parent;
+
+ err = 0;
+ if (au_digen_test(dentry, sigen)) {
+ parent = dget_parent(dentry);
+ di_read_lock_parent(parent, AuLock_IR);
+ err = au_refresh_dentry(dentry, parent);
+ di_read_unlock(parent, AuLock_IR);
+ dput(parent);
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
+ struct au_icpup_args *a)
+{
+ int err;
+ loff_t sz;
+ aufs_bindex_t btop, ibtop;
+ struct dentry *hi_wh, *parent;
+ struct inode *inode;
+ struct au_wr_dir_args wr_dir_args = {
+ .force_btgt = -1,
+ .flags = 0
+ };
+
+ if (d_is_dir(dentry))
+ au_fset_wrdir(wr_dir_args.flags, ISDIR);
+ /* plink or hi_wh() case */
+ btop = au_dbtop(dentry);
+ inode = d_inode(dentry);
+ ibtop = au_ibtop(inode);
+ if (btop != ibtop && !au_test_ro(inode->i_sb, ibtop, inode))
+ wr_dir_args.force_btgt = ibtop;
+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+ if (unlikely(err < 0))
+ goto out;
+ a->btgt = err;
+ if (err != btop)
+ au_fset_icpup(a->flags, DID_CPUP);
+
+ err = 0;
+ a->pin_flags = AuPin_MNT_WRITE;
+ parent = NULL;
+ if (!IS_ROOT(dentry)) {
+ au_fset_pin(a->pin_flags, DI_LOCKED);
+ parent = dget_parent(dentry);
+ di_write_lock_parent(parent);
+ }
+
+ err = au_pin(&a->pin, dentry, a->btgt, a->udba, a->pin_flags);
+ if (unlikely(err))
+ goto out_parent;
+
+ sz = -1;
+ a->h_path.dentry = au_h_dptr(dentry, btop);
+ a->h_inode = d_inode(a->h_path.dentry);
+ if (ia && (ia->ia_valid & ATTR_SIZE)) {
+ inode_lock_shared_nested(a->h_inode, AuLsc_I_CHILD);
+ if (ia->ia_size < i_size_read(a->h_inode))
+ sz = ia->ia_size;
+ inode_unlock_shared(a->h_inode);
+ }
+
+ hi_wh = NULL;
+ if (au_ftest_icpup(a->flags, DID_CPUP) && d_unlinked(dentry)) {
+ hi_wh = au_hi_wh(inode, a->btgt);
+ if (!hi_wh) {
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = a->btgt,
+ .bsrc = -1,
+ .len = sz,
+ .pin = &a->pin
+ };
+ err = au_sio_cpup_wh(&cpg, /*file*/NULL);
+ if (unlikely(err))
+ goto out_unlock;
+ hi_wh = au_hi_wh(inode, a->btgt);
+ /* todo: revalidate hi_wh? */
+ }
+ }
+
+ if (parent) {
+ au_pin_set_parent_lflag(&a->pin, /*lflag*/0);
+ di_downgrade_lock(parent, AuLock_IR);
+ dput(parent);
+ parent = NULL;
+ }
+ if (!au_ftest_icpup(a->flags, DID_CPUP))
+ goto out; /* success */
+
+ if (!d_unhashed(dentry)) {
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = a->btgt,
+ .bsrc = btop,
+ .len = sz,
+ .pin = &a->pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+ err = au_sio_cpup_simple(&cpg);
+ if (!err)
+ a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+ } else if (!hi_wh)
+ a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+ else
+ a->h_path.dentry = hi_wh; /* do not dget here */
+
+out_unlock:
+ a->h_inode = d_inode(a->h_path.dentry);
+ if (!err)
+ goto out; /* success */
+ au_unpin(&a->pin);
+out_parent:
+ if (parent) {
+ di_write_unlock(parent);
+ dput(parent);
+ }
+out:
+ if (!err)
+ inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
+ return err;
+}
+
+static int aufs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+ int err;
+ struct inode *inode, *delegated;
+ struct super_block *sb;
+ struct file *file;
+ struct au_icpup_args *a;
+
+ inode = d_inode(dentry);
+ IMustLock(inode);
+
+ err = setattr_prepare(dentry, ia);
+ if (unlikely(err))
+ goto out;
+
+ err = -ENOMEM;
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+ ia->ia_valid &= ~ATTR_MODE;
+
+ file = NULL;
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out_kfree;
+
+ if (ia->ia_valid & ATTR_FILE) {
+ /* currently ftruncate(2) only */
+ AuDebugOn(!d_is_reg(dentry));
+ file = ia->ia_file;
+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1,
+ /*fi_lsc*/0);
+ if (unlikely(err))
+ goto out_si;
+ ia->ia_file = au_hf_top(file);
+ a->udba = AuOpt_UDBA_NONE;
+ } else {
+ /* fchmod() doesn't pass ia_file */
+ a->udba = au_opt_udba(sb);
+ di_write_lock_child(dentry);
+ /* no d_unlinked(), to set UDBA_NONE for root */
+ if (d_unhashed(dentry))
+ a->udba = AuOpt_UDBA_NONE;
+ if (a->udba != AuOpt_UDBA_NONE) {
+ AuDebugOn(IS_ROOT(dentry));
+ err = au_reval_for_attr(dentry, au_sigen(sb));
+ if (unlikely(err))
+ goto out_dentry;
+ }
+ }
+
+ err = au_pin_and_icpup(dentry, ia, a);
+ if (unlikely(err < 0))
+ goto out_dentry;
+ if (au_ftest_icpup(a->flags, DID_CPUP)) {
+ ia->ia_file = NULL;
+ ia->ia_valid &= ~ATTR_FILE;
+ }
+
+ a->h_path.mnt = au_sbr_mnt(sb, a->btgt);
+ if ((ia->ia_valid & (ATTR_MODE | ATTR_CTIME))
+ == (ATTR_MODE | ATTR_CTIME)) {
+ err = security_path_chmod(&a->h_path, ia->ia_mode);
+ if (unlikely(err))
+ goto out_unlock;
+ } else if ((ia->ia_valid & (ATTR_UID | ATTR_GID))
+ && (ia->ia_valid & ATTR_CTIME)) {
+ err = security_path_chown(&a->h_path, ia->ia_uid, ia->ia_gid);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ if (ia->ia_valid & ATTR_SIZE) {
+ struct file *f;
+
+ if (ia->ia_size < i_size_read(inode))
+ /* unmap only */
+ truncate_setsize(inode, ia->ia_size);
+
+ f = NULL;
+ if (ia->ia_valid & ATTR_FILE)
+ f = ia->ia_file;
+ inode_unlock(a->h_inode);
+ err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f);
+ inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
+ } else {
+ delegated = NULL;
+ while (1) {
+ err = vfsub_notify_change(&a->h_path, ia, &delegated);
+ if (delegated) {
+ err = break_deleg_wait(&delegated);
+ if (!err)
+ continue;
+ }
+ break;
+ }
+ }
+ /*
+ * regardless aufs 'acl' option setting.
+ * why don't all acl-aware fs call this func from their ->setattr()?
+ */
+ if (!err && (ia->ia_valid & ATTR_MODE))
+ err = vfsub_acl_chmod(a->h_inode, ia->ia_mode);
+ if (!err)
+ au_cpup_attr_changeable(inode);
+
+out_unlock:
+ inode_unlock(a->h_inode);
+ au_unpin(&a->pin);
+ if (unlikely(err))
+ au_update_dbtop(dentry);
+out_dentry:
+ di_write_unlock(dentry);
+ if (file) {
+ fi_write_unlock(file);
+ ia->ia_file = file;
+ ia->ia_valid |= ATTR_FILE;
+ }
+out_si:
+ si_read_unlock(sb);
+out_kfree:
+ au_kfree_rcu(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL)
+static int au_h_path_to_set_attr(struct dentry *dentry,
+ struct au_icpup_args *a, struct path *h_path)
+{
+ int err;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ a->udba = au_opt_udba(sb);
+ /* no d_unlinked(), to set UDBA_NONE for root */
+ if (d_unhashed(dentry))
+ a->udba = AuOpt_UDBA_NONE;
+ if (a->udba != AuOpt_UDBA_NONE) {
+ AuDebugOn(IS_ROOT(dentry));
+ err = au_reval_for_attr(dentry, au_sigen(sb));
+ if (unlikely(err))
+ goto out;
+ }
+ err = au_pin_and_icpup(dentry, /*ia*/NULL, a);
+ if (unlikely(err < 0))
+ goto out;
+
+ h_path->dentry = a->h_path.dentry;
+ h_path->mnt = au_sbr_mnt(sb, a->btgt);
+
+out:
+ return err;
+}
+
+ssize_t au_sxattr(struct dentry *dentry, struct inode *inode,
+ struct au_sxattr *arg)
+{
+ int err;
+ struct path h_path;
+ struct super_block *sb;
+ struct au_icpup_args *a;
+ struct inode *h_inode;
+
+ IMustLock(inode);
+
+ err = -ENOMEM;
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out_kfree;
+
+ h_path.dentry = NULL; /* silence gcc */
+ di_write_lock_child(dentry);
+ err = au_h_path_to_set_attr(dentry, a, &h_path);
+ if (unlikely(err))
+ goto out_di;
+
+ inode_unlock(a->h_inode);
+ switch (arg->type) {
+ case AU_XATTR_SET:
+ AuDebugOn(d_is_negative(h_path.dentry));
+ err = vfsub_setxattr(h_path.dentry,
+ arg->u.set.name, arg->u.set.value,
+ arg->u.set.size, arg->u.set.flags);
+ break;
+ case AU_ACL_SET:
+ err = -EOPNOTSUPP;
+ h_inode = d_inode(h_path.dentry);
+ if (h_inode->i_op->set_acl)
+ /* this will call posix_acl_update_mode */
+ err = h_inode->i_op->set_acl(h_inode,
+ arg->u.acl_set.acl,
+ arg->u.acl_set.type);
+ break;
+ }
+ if (!err)
+ au_cpup_attr_timesizes(inode);
+
+ au_unpin(&a->pin);
+ if (unlikely(err))
+ au_update_dbtop(dentry);
+
+out_di:
+ di_write_unlock(dentry);
+ si_read_unlock(sb);
+out_kfree:
+ au_kfree_rcu(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
+#endif
+
+static void au_refresh_iattr(struct inode *inode, struct kstat *st,
+ unsigned int nlink)
+{
+ unsigned int n;
+
+ inode->i_mode = st->mode;
+ /* don't i_[ug]id_write() here */
+ inode->i_uid = st->uid;
+ inode->i_gid = st->gid;
+ inode->i_atime = st->atime;
+ inode->i_mtime = st->mtime;
+ inode->i_ctime = st->ctime;
+
+ au_cpup_attr_nlink(inode, /*force*/0);
+ if (S_ISDIR(inode->i_mode)) {
+ n = inode->i_nlink;
+ n -= nlink;
+ n += st->nlink;
+ smp_mb(); /* for i_nlink */
+ /* 0 can happen */
+ set_nlink(inode, n);
+ }
+
+ spin_lock(&inode->i_lock);
+ inode->i_blocks = st->blocks;
+ i_size_write(inode, st->size);
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * common routine for aufs_getattr() and au_getxattr().
+ * returns zero or negative (an error).
+ * @dentry will be read-locked in success.
+ */
+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path,
+ int locked)
+{
+ int err;
+ unsigned int mnt_flags, sigen;
+ unsigned char udba_none;
+ aufs_bindex_t bindex;
+ struct super_block *sb, *h_sb;
+ struct inode *inode;
+
+ h_path->mnt = NULL;
+ h_path->dentry = NULL;
+
+ err = 0;
+ sb = dentry->d_sb;
+ mnt_flags = au_mntflags(sb);
+ udba_none = !!au_opt_test(mnt_flags, UDBA_NONE);
+
+ if (unlikely(locked))
+ goto body; /* skip locking dinfo */
+
+ /* support fstat(2) */
+ if (!d_unlinked(dentry) && !udba_none) {
+ sigen = au_sigen(sb);
+ err = au_digen_test(dentry, sigen);
+ if (!err) {
+ di_read_lock_child(dentry, AuLock_IR);
+ err = au_dbrange_test(dentry);
+ if (unlikely(err)) {
+ di_read_unlock(dentry, AuLock_IR);
+ goto out;
+ }
+ } else {
+ AuDebugOn(IS_ROOT(dentry));
+ di_write_lock_child(dentry);
+ err = au_dbrange_test(dentry);
+ if (!err)
+ err = au_reval_for_attr(dentry, sigen);
+ if (!err)
+ di_downgrade_lock(dentry, AuLock_IR);
+ else {
+ di_write_unlock(dentry);
+ goto out;
+ }
+ }
+ } else
+ di_read_lock_child(dentry, AuLock_IR);
+
+body:
+ inode = d_inode(dentry);
+ bindex = au_ibtop(inode);
+ h_path->mnt = au_sbr_mnt(sb, bindex);
+ h_sb = h_path->mnt->mnt_sb;
+ if (!force
+ && !au_test_fs_bad_iattr(h_sb)
+ && udba_none)
+ goto out; /* success */
+
+ if (au_dbtop(dentry) == bindex)
+ h_path->dentry = au_h_dptr(dentry, bindex);
+ else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) {
+ h_path->dentry = au_plink_lkup(inode, bindex);
+ if (IS_ERR(h_path->dentry))
+ /* pretending success */
+ h_path->dentry = NULL;
+ else
+ dput(h_path->dentry);
+ }
+
+out:
+ return err;
+}
+
+static int aufs_getattr(const struct path *path, struct kstat *st,
+ u32 request, unsigned int query)
+{
+ int err;
+ unsigned char positive;
+ struct path h_path;
+ struct dentry *dentry;
+ struct inode *inode;
+ struct super_block *sb;
+
+ dentry = path->dentry;
+ inode = d_inode(dentry);
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out;
+ err = au_h_path_getattr(dentry, /*force*/0, &h_path, /*locked*/0);
+ if (unlikely(err))
+ goto out_si;
+ if (unlikely(!h_path.dentry))
+ /* illegally overlapped or something */
+ goto out_fill; /* pretending success */
+
+ positive = d_is_positive(h_path.dentry);
+ if (positive)
+ /* no vfsub version */
+ err = vfs_getattr(&h_path, st, request, query);
+ if (!err) {
+ if (positive)
+ au_refresh_iattr(inode, st,
+ d_inode(h_path.dentry)->i_nlink);
+ goto out_fill; /* success */
+ }
+ AuTraceErr(err);
+ goto out_di;
+
+out_fill:
+ generic_fillattr(inode, st);
+out_di:
+ di_read_unlock(dentry, AuLock_IR);
+out_si:
+ si_read_unlock(sb);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static const char *aufs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ const char *ret;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ int err;
+ aufs_bindex_t bindex;
+
+ ret = NULL; /* suppress a warning */
+ err = -ECHILD;
+ if (!dentry)
+ goto out;
+
+ err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
+ if (unlikely(err))
+ goto out;
+
+ err = au_d_hashed_positive(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+
+ err = -EINVAL;
+ inode = d_inode(dentry);
+ bindex = au_ibtop(inode);
+ h_inode = au_h_iptr(inode, bindex);
+ if (unlikely(!h_inode->i_op->get_link))
+ goto out_unlock;
+
+ err = -EBUSY;
+ h_dentry = NULL;
+ if (au_dbtop(dentry) <= bindex) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry)
+ dget(h_dentry);
+ }
+ if (!h_dentry) {
+ h_dentry = d_find_any_alias(h_inode);
+ if (IS_ERR(h_dentry)) {
+ err = PTR_ERR(h_dentry);
+ goto out_unlock;
+ }
+ }
+ if (unlikely(!h_dentry))
+ goto out_unlock;
+
+ err = 0;
+ AuDbg("%ps\n", h_inode->i_op->get_link);
+ AuDbgDentry(h_dentry);
+ ret = vfs_get_link(h_dentry, done);
+ dput(h_dentry);
+ if (IS_ERR(ret))
+ err = PTR_ERR(ret);
+
+out_unlock:
+ aufs_read_unlock(dentry, AuLock_IR);
+out:
+ if (unlikely(err))
+ ret = ERR_PTR(err);
+ AuTraceErrPtr(ret);
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_is_special(struct inode *inode)
+{
+ return (inode->i_mode & (S_IFBLK | S_IFCHR | S_IFIFO | S_IFSOCK));
+}
+
+static int aufs_update_time(struct inode *inode, struct timespec64 *ts,
+ int flags)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct super_block *sb;
+ struct inode *h_inode;
+ struct vfsmount *h_mnt;
+
+ sb = inode->i_sb;
+ WARN_ONCE((flags & S_ATIME) && !IS_NOATIME(inode),
+ "unexpected s_flags 0x%lx", sb->s_flags);
+
+ /* mmap_sem might be acquired already, cf. aufs_mmap() */
+ lockdep_off();
+ si_read_lock(sb, AuLock_FLUSH);
+ ii_write_lock_child(inode);
+
+ err = 0;
+ bindex = au_ibtop(inode);
+ h_inode = au_h_iptr(inode, bindex);
+ if (!au_test_ro(sb, bindex, inode)) {
+ h_mnt = au_sbr_mnt(sb, bindex);
+ err = vfsub_mnt_want_write(h_mnt);
+ if (!err) {
+ err = vfsub_update_time(h_inode, ts, flags);
+ vfsub_mnt_drop_write(h_mnt);
+ }
+ } else if (au_is_special(h_inode)) {
+ /*
+ * Never copy-up here.
+ * These special files may already be opened and used for
+ * communicating. If we copied it up, then the communication
+ * would be corrupted.
+ */
+ AuWarn1("timestamps for i%lu are ignored "
+ "since it is on readonly branch (hi%lu).\n",
+ inode->i_ino, h_inode->i_ino);
+ } else if (flags & ~S_ATIME) {
+ err = -EIO;
+ AuIOErr1("unexpected flags 0x%x\n", flags);
+ AuDebugOn(1);
+ }
+
+ if (!err)
+ au_cpup_attr_timesizes(inode);
+ ii_write_unlock(inode);
+ si_read_unlock(sb);
+ lockdep_on();
+
+ if (!err && (flags & S_VERSION))
+ inode_inc_iversion(inode);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* no getattr version will be set by module.c:aufs_init() */
+struct inode_operations aufs_iop_nogetattr[AuIop_Last],
+ aufs_iop[] = {
+ [AuIop_SYMLINK] = {
+ .permission = aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+ .get_acl = aufs_get_acl,
+ .set_acl = aufs_set_acl, /* unsupport for symlink? */
+#endif
+
+ .setattr = aufs_setattr,
+ .getattr = aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+ .listxattr = aufs_listxattr,
+#endif
+
+ .get_link = aufs_get_link,
+
+ /* .update_time = aufs_update_time */
+ },
+ [AuIop_DIR] = {
+ .create = aufs_create,
+ .lookup = aufs_lookup,
+ .link = aufs_link,
+ .unlink = aufs_unlink,
+ .symlink = aufs_symlink,
+ .mkdir = aufs_mkdir,
+ .rmdir = aufs_rmdir,
+ .mknod = aufs_mknod,
+ .rename = aufs_rename,
+
+ .permission = aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+ .get_acl = aufs_get_acl,
+ .set_acl = aufs_set_acl,
+#endif
+
+ .setattr = aufs_setattr,
+ .getattr = aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+ .listxattr = aufs_listxattr,
+#endif
+
+ .update_time = aufs_update_time,
+ .atomic_open = aufs_atomic_open,
+ .tmpfile = aufs_tmpfile
+ },
+ [AuIop_OTHER] = {
+ .permission = aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+ .get_acl = aufs_get_acl,
+ .set_acl = aufs_set_acl,
+#endif
+
+ .setattr = aufs_setattr,
+ .getattr = aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+ .listxattr = aufs_listxattr,
+#endif
+
+ .update_time = aufs_update_time
+ }
+};
diff --git a/fs/aufs/i_op_add.c b/fs/aufs/i_op_add.c
new file mode 100644
index 000000000000..73002d691e9d
--- /dev/null
+++ b/fs/aufs/i_op_add.c
@@ -0,0 +1,935 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operations (add entry)
+ */
+
+#include "aufs.h"
+
+/*
+ * final procedure of adding a new entry, except link(2).
+ * remove whiteout, instantiate, copyup the parent dir's times and size
+ * and update version.
+ * if it failed, re-create the removed whiteout.
+ */
+static int epilog(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct dentry *dentry)
+{
+ int err, rerr;
+ aufs_bindex_t bwh;
+ struct path h_path;
+ struct super_block *sb;
+ struct inode *inode, *h_dir;
+ struct dentry *wh;
+
+ bwh = -1;
+ sb = dir->i_sb;
+ if (wh_dentry) {
+ h_dir = d_inode(wh_dentry->d_parent); /* dir inode is locked */
+ IMustLock(h_dir);
+ AuDebugOn(au_h_iptr(dir, bindex) != h_dir);
+ bwh = au_dbwh(dentry);
+ h_path.dentry = wh_dentry;
+ h_path.mnt = au_sbr_mnt(sb, bindex);
+ err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path,
+ dentry);
+ if (unlikely(err))
+ goto out;
+ }
+
+ inode = au_new_inode(dentry, /*must_new*/1);
+ if (!IS_ERR(inode)) {
+ d_instantiate(dentry, inode);
+ dir = d_inode(dentry->d_parent); /* dir inode is locked */
+ IMustLock(dir);
+ au_dir_ts(dir, bindex);
+ inode_inc_iversion(dir);
+ au_fhsm_wrote(sb, bindex, /*force*/0);
+ return 0; /* success */
+ }
+
+ err = PTR_ERR(inode);
+ if (!wh_dentry)
+ goto out;
+
+ /* revert */
+ /* dir inode is locked */
+ wh = au_wh_create(dentry, bwh, wh_dentry->d_parent);
+ rerr = PTR_ERR(wh);
+ if (IS_ERR(wh)) {
+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n",
+ dentry, err, rerr);
+ err = -EIO;
+ } else
+ dput(wh);
+
+out:
+ return err;
+}
+
+static int au_d_may_add(struct dentry *dentry)
+{
+ int err;
+
+ err = 0;
+ if (unlikely(d_unhashed(dentry)))
+ err = -ENOENT;
+ if (unlikely(d_really_is_positive(dentry)))
+ err = -EEXIST;
+ return err;
+}
+
+/*
+ * simple tests for the adding inode operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent, int isdir)
+{
+ int err;
+ umode_t h_mode;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+
+ err = -ENAMETOOLONG;
+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+ goto out;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (d_really_is_negative(dentry)) {
+ err = -EEXIST;
+ if (unlikely(d_is_positive(h_dentry)))
+ goto out;
+ } else {
+ /* rename(2) case */
+ err = -EIO;
+ if (unlikely(d_is_negative(h_dentry)))
+ goto out;
+ h_inode = d_inode(h_dentry);
+ if (unlikely(!h_inode->i_nlink))
+ goto out;
+
+ h_mode = h_inode->i_mode;
+ if (!isdir) {
+ err = -EISDIR;
+ if (unlikely(S_ISDIR(h_mode)))
+ goto out;
+ } else if (unlikely(!S_ISDIR(h_mode))) {
+ err = -ENOTDIR;
+ goto out;
+ }
+ }
+
+ err = 0;
+ /* expected parent dir is locked */
+ if (unlikely(h_parent != h_dentry->d_parent))
+ err = -EIO;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * initial procedure of adding a new entry.
+ * prepare writable branch and the parent dir, lock it,
+ * and lookup whiteout for the new entry.
+ */
+static struct dentry*
+lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt,
+ struct dentry *src_dentry, struct au_pin *pin,
+ struct au_wr_dir_args *wr_dir_args)
+{
+ struct dentry *wh_dentry, *h_parent;
+ struct super_block *sb;
+ struct au_branch *br;
+ int err;
+ unsigned int udba;
+ aufs_bindex_t bcpup;
+
+ AuDbg("%pd\n", dentry);
+
+ err = au_wr_dir(dentry, src_dentry, wr_dir_args);
+ bcpup = err;
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err < 0))
+ goto out;
+
+ sb = dentry->d_sb;
+ udba = au_opt_udba(sb);
+ err = au_pin(pin, dentry, bcpup, udba,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ h_parent = au_pinned_h_parent(pin);
+ if (udba != AuOpt_UDBA_NONE
+ && au_dbtop(dentry) == bcpup)
+ err = au_may_add(dentry, bcpup, h_parent,
+ au_ftest_wrdir(wr_dir_args->flags, ISDIR));
+ else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+ err = -ENAMETOOLONG;
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_unpin;
+
+ br = au_sbr(sb, bcpup);
+ if (dt) {
+ struct path tmp = {
+ .dentry = h_parent,
+ .mnt = au_br_mnt(br)
+ };
+ au_dtime_store(dt, au_pinned_parent(pin), &tmp);
+ }
+
+ wh_dentry = NULL;
+ if (bcpup != au_dbwh(dentry))
+ goto out; /* success */
+
+ /*
+ * ENAMETOOLONG here means that if we allowed create such name, then it
+ * would not be able to removed in the future. So we don't allow such
+ * name here and we don't handle ENAMETOOLONG differently here.
+ */
+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+
+out_unpin:
+ if (IS_ERR(wh_dentry))
+ au_unpin(pin);
+out:
+ return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+enum { Mknod, Symlink, Creat };
+struct simple_arg {
+ int type;
+ union {
+ struct {
+ umode_t mode;
+ bool want_excl;
+ bool try_aopen;
+ struct vfsub_aopen_args *aopen;
+ } c;
+ struct {
+ const char *symname;
+ } s;
+ struct {
+ umode_t mode;
+ dev_t dev;
+ } m;
+ } u;
+};
+
+static int add_simple(struct inode *dir, struct dentry *dentry,
+ struct simple_arg *arg)
+{
+ int err, rerr;
+ aufs_bindex_t btop;
+ unsigned char created;
+ const unsigned char try_aopen
+ = (arg->type == Creat && arg->u.c.try_aopen);
+ struct vfsub_aopen_args *aopen = arg->u.c.aopen;
+ struct dentry *wh_dentry, *parent;
+ struct inode *h_dir;
+ struct super_block *sb;
+ struct au_branch *br;
+ /* to reduce stack size */
+ struct {
+ struct au_dtime dt;
+ struct au_pin pin;
+ struct path h_path;
+ struct au_wr_dir_args wr_dir_args;
+ } *a;
+
+ AuDbg("%pd\n", dentry);
+ IMustLock(dir);
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+ a->wr_dir_args.force_btgt = -1;
+ a->wr_dir_args.flags = AuWrDir_ADD_ENTRY;
+
+ parent = dentry->d_parent; /* dir inode is locked */
+ if (!try_aopen) {
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+ if (unlikely(err))
+ goto out_free;
+ }
+ err = au_d_may_add(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ if (!try_aopen)
+ di_write_lock_parent(parent);
+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+ &a->pin, &a->wr_dir_args);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ btop = au_dbtop(dentry);
+ sb = dentry->d_sb;
+ br = au_sbr(sb, btop);
+ a->h_path.dentry = au_h_dptr(dentry, btop);
+ a->h_path.mnt = au_br_mnt(br);
+ h_dir = au_pinned_h_dir(&a->pin);
+ switch (arg->type) {
+ case Creat:
+ if (!try_aopen || !h_dir->i_op->atomic_open) {
+ err = vfsub_create(h_dir, &a->h_path, arg->u.c.mode,
+ arg->u.c.want_excl);
+ created = !err;
+ if (!err && try_aopen)
+ aopen->file->f_mode |= FMODE_CREATED;
+ } else {
+ aopen->br = br;
+ err = vfsub_atomic_open(h_dir, a->h_path.dentry, aopen);
+ AuDbg("err %d\n", err);
+ AuDbgFile(aopen->file);
+ created = err >= 0
+ && !!(aopen->file->f_mode & FMODE_CREATED);
+ }
+ break;
+ case Symlink:
+ err = vfsub_symlink(h_dir, &a->h_path, arg->u.s.symname);
+ created = !err;
+ break;
+ case Mknod:
+ err = vfsub_mknod(h_dir, &a->h_path, arg->u.m.mode,
+ arg->u.m.dev);
+ created = !err;
+ break;
+ default:
+ BUG();
+ }
+ if (unlikely(err < 0))
+ goto out_unpin;
+
+ err = epilog(dir, btop, wh_dentry, dentry);
+ if (!err)
+ goto out_unpin; /* success */
+
+ /* revert */
+ if (created /* && d_is_positive(a->h_path.dentry) */) {
+ /* no delegation since it is just created */
+ rerr = vfsub_unlink(h_dir, &a->h_path, /*delegated*/NULL,
+ /*force*/0);
+ if (rerr) {
+ AuIOErr("%pd revert failure(%d, %d)\n",
+ dentry, err, rerr);
+ err = -EIO;
+ }
+ au_dtime_revert(&a->dt);
+ }
+ if (try_aopen && h_dir->i_op->atomic_open
+ && (aopen->file->f_mode & FMODE_OPENED))
+ /* aopen->file is still opened */
+ au_lcnt_dec(&aopen->br->br_nfiles);
+
+out_unpin:
+ au_unpin(&a->pin);
+ dput(wh_dentry);
+out_parent:
+ if (!try_aopen)
+ di_write_unlock(parent);
+out_unlock:
+ if (unlikely(err)) {
+ au_update_dbtop(dentry);
+ d_drop(dentry);
+ }
+ if (!try_aopen)
+ aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+ au_kfree_rcu(a);
+out:
+ return err;
+}
+
+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t dev)
+{
+ struct simple_arg arg = {
+ .type = Mknod,
+ .u.m = {
+ .mode = mode,
+ .dev = dev
+ }
+ };
+ return add_simple(dir, dentry, &arg);
+}
+
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+ struct simple_arg arg = {
+ .type = Symlink,
+ .u.s.symname = symname
+ };
+ return add_simple(dir, dentry, &arg);
+}
+
+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool want_excl)
+{
+ struct simple_arg arg = {
+ .type = Creat,
+ .u.c = {
+ .mode = mode,
+ .want_excl = want_excl
+ }
+ };
+ return add_simple(dir, dentry, &arg);
+}
+
+int au_aopen_or_create(struct inode *dir, struct dentry *dentry,
+ struct vfsub_aopen_args *aopen_args)
+{
+ struct simple_arg arg = {
+ .type = Creat,
+ .u.c = {
+ .mode = aopen_args->create_mode,
+ .want_excl = aopen_args->open_flag & O_EXCL,
+ .try_aopen = true,
+ .aopen = aopen_args
+ }
+ };
+ return add_simple(dir, dentry, &arg);
+}
+
+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct super_block *sb;
+ struct dentry *parent, *h_parent, *h_dentry;
+ struct inode *h_dir, *inode;
+ struct vfsmount *h_mnt;
+ struct au_wr_dir_args wr_dir_args = {
+ .force_btgt = -1,
+ .flags = AuWrDir_TMPFILE
+ };
+
+ /* copy-up may happen */
+ inode_lock(dir);
+
+ sb = dir->i_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out;
+
+ err = au_di_init(dentry);
+ if (unlikely(err))
+ goto out_si;
+
+ err = -EBUSY;
+ parent = d_find_any_alias(dir);
+ AuDebugOn(!parent);
+ di_write_lock_parent(parent);
+ if (unlikely(d_inode(parent) != dir))
+ goto out_parent;
+
+ err = au_digen_test(parent, au_sigen(sb));
+ if (unlikely(err))
+ goto out_parent;
+
+ bindex = au_dbtop(parent);
+ au_set_dbtop(dentry, bindex);
+ au_set_dbbot(dentry, bindex);
+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+ bindex = err;
+ if (unlikely(err < 0))
+ goto out_parent;
+
+ err = -EOPNOTSUPP;
+ h_dir = au_h_iptr(dir, bindex);
+ if (unlikely(!h_dir->i_op->tmpfile))
+ goto out_parent;
+
+ h_mnt = au_sbr_mnt(sb, bindex);
+ err = vfsub_mnt_want_write(h_mnt);
+ if (unlikely(err))
+ goto out_parent;
+
+ h_parent = au_h_dptr(parent, bindex);
+ h_dentry = vfs_tmpfile(h_parent, mode, /*open_flag*/0);
+ if (IS_ERR(h_dentry)) {
+ err = PTR_ERR(h_dentry);
+ goto out_mnt;
+ }
+
+ au_set_dbtop(dentry, bindex);
+ au_set_dbbot(dentry, bindex);
+ au_set_h_dptr(dentry, bindex, dget(h_dentry));
+ inode = au_new_inode(dentry, /*must_new*/1);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ au_set_h_dptr(dentry, bindex, NULL);
+ au_set_dbtop(dentry, -1);
+ au_set_dbbot(dentry, -1);
+ } else {
+ if (!inode->i_nlink)
+ set_nlink(inode, 1);
+ d_tmpfile(dentry, inode);
+ au_di(dentry)->di_tmpfile = 1;
+
+ /* update without i_mutex */
+ if (au_ibtop(dir) == au_dbtop(dentry))
+ au_cpup_attr_timesizes(dir);
+ }
+ dput(h_dentry);
+
+out_mnt:
+ vfsub_mnt_drop_write(h_mnt);
+out_parent:
+ di_write_unlock(parent);
+ dput(parent);
+ di_write_unlock(dentry);
+ if (unlikely(err)) {
+ au_di_fin(dentry);
+ dentry->d_fsdata = NULL;
+ }
+out_si:
+ si_read_unlock(sb);
+out:
+ inode_unlock(dir);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_link_args {
+ aufs_bindex_t bdst, bsrc;
+ struct au_pin pin;
+ struct path h_path;
+ struct dentry *src_parent, *parent;
+};
+
+static int au_cpup_before_link(struct dentry *src_dentry,
+ struct au_link_args *a)
+{
+ int err;
+ struct dentry *h_src_dentry;
+ struct au_cp_generic cpg = {
+ .dentry = src_dentry,
+ .bdst = a->bdst,
+ .bsrc = a->bsrc,
+ .len = -1,
+ .pin = &a->pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN /* | AuCpup_KEEPLINO */
+ };
+
+ di_read_lock_parent(a->src_parent, AuLock_IR);
+ err = au_test_and_cpup_dirs(src_dentry, a->bdst);
+ if (unlikely(err))
+ goto out;
+
+ h_src_dentry = au_h_dptr(src_dentry, a->bsrc);
+ err = au_pin(&a->pin, src_dentry, a->bdst,
+ au_opt_udba(src_dentry->d_sb),
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out;
+
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&a->pin);
+
+out:
+ di_read_unlock(a->src_parent, AuLock_IR);
+ return err;
+}
+
+static int au_cpup_or_link(struct dentry *src_dentry, struct dentry *dentry,
+ struct au_link_args *a)
+{
+ int err;
+ unsigned char plink;
+ aufs_bindex_t bbot;
+ struct dentry *h_src_dentry;
+ struct inode *h_inode, *inode, *delegated;
+ struct super_block *sb;
+ struct file *h_file;
+
+ plink = 0;
+ h_inode = NULL;
+ sb = src_dentry->d_sb;
+ inode = d_inode(src_dentry);
+ if (au_ibtop(inode) <= a->bdst)
+ h_inode = au_h_iptr(inode, a->bdst);
+ if (!h_inode || !h_inode->i_nlink) {
+ /* copyup src_dentry as the name of dentry. */
+ bbot = au_dbbot(dentry);
+ if (bbot < a->bsrc)
+ au_set_dbbot(dentry, a->bsrc);
+ au_set_h_dptr(dentry, a->bsrc,
+ dget(au_h_dptr(src_dentry, a->bsrc)));
+ dget(a->h_path.dentry);
+ au_set_h_dptr(dentry, a->bdst, NULL);
+ AuDbg("temporary d_inode...\n");
+ spin_lock(&dentry->d_lock);
+ dentry->d_inode = d_inode(src_dentry); /* tmp */
+ spin_unlock(&dentry->d_lock);
+ h_file = au_h_open_pre(dentry, a->bsrc, /*force_wr*/0);
+ if (IS_ERR(h_file))
+ err = PTR_ERR(h_file);
+ else {
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = a->bdst,
+ .bsrc = -1,
+ .len = -1,
+ .pin = &a->pin,
+ .flags = AuCpup_KEEPLINO
+ };
+ err = au_sio_cpup_simple(&cpg);
+ au_h_open_post(dentry, a->bsrc, h_file);
+ if (!err) {
+ dput(a->h_path.dentry);
+ a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+ } else
+ au_set_h_dptr(dentry, a->bdst,
+ a->h_path.dentry);
+ }
+ spin_lock(&dentry->d_lock);
+ dentry->d_inode = NULL; /* restore */
+ spin_unlock(&dentry->d_lock);
+ AuDbg("temporary d_inode...done\n");
+ au_set_h_dptr(dentry, a->bsrc, NULL);
+ au_set_dbbot(dentry, bbot);
+ } else {
+ /* the inode of src_dentry already exists on a.bdst branch */
+ h_src_dentry = d_find_alias(h_inode);
+ if (!h_src_dentry && au_plink_test(inode)) {
+ plink = 1;
+ h_src_dentry = au_plink_lkup(inode, a->bdst);
+ err = PTR_ERR(h_src_dentry);
+ if (IS_ERR(h_src_dentry))
+ goto out;
+
+ if (unlikely(d_is_negative(h_src_dentry))) {
+ dput(h_src_dentry);
+ h_src_dentry = NULL;
+ }
+
+ }
+ if (h_src_dentry) {
+ delegated = NULL;
+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+ &a->h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ dput(h_src_dentry);
+ } else {
+ AuIOErr("no dentry found for hi%lu on b%d\n",
+ h_inode->i_ino, a->bdst);
+ err = -EIO;
+ }
+ }
+
+ if (!err && !plink)
+ au_plink_append(inode, a->bdst, a->h_path.dentry);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ int err, rerr;
+ struct au_dtime dt;
+ struct au_link_args *a;
+ struct dentry *wh_dentry, *h_src_dentry;
+ struct inode *inode, *delegated;
+ struct super_block *sb;
+ struct au_wr_dir_args wr_dir_args = {
+ /* .force_btgt = -1, */
+ .flags = AuWrDir_ADD_ENTRY
+ };
+
+ IMustLock(dir);
+ inode = d_inode(src_dentry);
+ IMustLock(inode);
+
+ err = -ENOMEM;
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ a->parent = dentry->d_parent; /* dir inode is locked */
+ err = aufs_read_and_write_lock2(dentry, src_dentry,
+ AuLock_NOPLM | AuLock_GEN);
+ if (unlikely(err))
+ goto out_kfree;
+ err = au_d_linkable(src_dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ err = au_d_may_add(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+
+ a->src_parent = dget_parent(src_dentry);
+ wr_dir_args.force_btgt = au_ibtop(inode);
+
+ di_write_lock_parent(a->parent);
+ wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt);
+ wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin,
+ &wr_dir_args);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ err = 0;
+ sb = dentry->d_sb;
+ a->bdst = au_dbtop(dentry);
+ a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+ a->h_path.mnt = au_sbr_mnt(sb, a->bdst);
+ a->bsrc = au_ibtop(inode);
+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+ if (!h_src_dentry && au_di(src_dentry)->di_tmpfile)
+ h_src_dentry = dget(au_hi_wh(inode, a->bsrc));
+ if (!h_src_dentry) {
+ a->bsrc = au_dbtop(src_dentry);
+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+ AuDebugOn(!h_src_dentry);
+ } else if (IS_ERR(h_src_dentry)) {
+ err = PTR_ERR(h_src_dentry);
+ goto out_parent;
+ }
+
+ /*
+ * aufs doesn't touch the credential so
+ * security_dentry_create_files_as() is unnecessary.
+ */
+ if (au_opt_test(au_mntflags(sb), PLINK)) {
+ if (a->bdst < a->bsrc
+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */)
+ err = au_cpup_or_link(src_dentry, dentry, a);
+ else {
+ delegated = NULL;
+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+ &a->h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ }
+ dput(h_src_dentry);
+ } else {
+ /*
+ * copyup src_dentry to the branch we process,
+ * and then link(2) to it.
+ */
+ dput(h_src_dentry);
+ if (a->bdst < a->bsrc
+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) {
+ au_unpin(&a->pin);
+ di_write_unlock(a->parent);
+ err = au_cpup_before_link(src_dentry, a);
+ di_write_lock_parent(a->parent);
+ if (!err)
+ err = au_pin(&a->pin, dentry, a->bdst,
+ au_opt_udba(sb),
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out_wh;
+ }
+ if (!err) {
+ h_src_dentry = au_h_dptr(src_dentry, a->bdst);
+ err = -ENOENT;
+ if (h_src_dentry && d_is_positive(h_src_dentry)) {
+ delegated = NULL;
+ err = vfsub_link(h_src_dentry,
+ au_pinned_h_dir(&a->pin),
+ &a->h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry"
+ " for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ }
+ }
+ }
+ if (unlikely(err))
+ goto out_unpin;
+
+ if (wh_dentry) {
+ a->h_path.dentry = wh_dentry;
+ err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path,
+ dentry);
+ if (unlikely(err))
+ goto out_revert;
+ }
+
+ au_dir_ts(dir, a->bdst);
+ inode_inc_iversion(dir);
+ inc_nlink(inode);
+ inode->i_ctime = dir->i_ctime;
+ d_instantiate(dentry, au_igrab(inode));
+ if (d_unhashed(a->h_path.dentry))
+ /* some filesystem calls d_drop() */
+ d_drop(dentry);
+ /* some filesystems consume an inode even hardlink */
+ au_fhsm_wrote(sb, a->bdst, /*force*/0);
+ goto out_unpin; /* success */
+
+out_revert:
+ /* no delegation since it is just created */
+ rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path,
+ /*delegated*/NULL, /*force*/0);
+ if (unlikely(rerr)) {
+ AuIOErr("%pd reverting failed(%d, %d)\n", dentry, err, rerr);
+ err = -EIO;
+ }
+ au_dtime_revert(&dt);
+out_unpin:
+ au_unpin(&a->pin);
+out_wh:
+ dput(wh_dentry);
+out_parent:
+ di_write_unlock(a->parent);
+ dput(a->src_parent);
+out_unlock:
+ if (unlikely(err)) {
+ au_update_dbtop(dentry);
+ d_drop(dentry);
+ }
+ aufs_read_and_write_unlock2(dentry, src_dentry);
+out_kfree:
+ au_kfree_rcu(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ int err, rerr;
+ aufs_bindex_t bindex;
+ unsigned char diropq;
+ struct path h_path;
+ struct dentry *wh_dentry, *parent, *opq_dentry;
+ struct inode *h_inode;
+ struct super_block *sb;
+ struct {
+ struct au_pin pin;
+ struct au_dtime dt;
+ } *a; /* reduce the stack usage */
+ struct au_wr_dir_args wr_dir_args = {
+ .force_btgt = -1,
+ .flags = AuWrDir_ADD_ENTRY | AuWrDir_ISDIR
+ };
+
+ IMustLock(dir);
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+ if (unlikely(err))
+ goto out_free;
+ err = au_d_may_add(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+
+ parent = dentry->d_parent; /* dir inode is locked */
+ di_write_lock_parent(parent);
+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+ &a->pin, &wr_dir_args);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ sb = dentry->d_sb;
+ bindex = au_dbtop(dentry);
+ h_path.dentry = au_h_dptr(dentry, bindex);
+ h_path.mnt = au_sbr_mnt(sb, bindex);
+ err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode);
+ if (unlikely(err))
+ goto out_unpin;
+
+ /* make the dir opaque */
+ diropq = 0;
+ h_inode = d_inode(h_path.dentry);
+ if (wh_dentry
+ || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) {
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ opq_dentry = au_diropq_create(dentry, bindex);
+ inode_unlock(h_inode);
+ err = PTR_ERR(opq_dentry);
+ if (IS_ERR(opq_dentry))
+ goto out_dir;
+ dput(opq_dentry);
+ diropq = 1;
+ }
+
+ err = epilog(dir, bindex, wh_dentry, dentry);
+ if (!err) {
+ inc_nlink(dir);
+ goto out_unpin; /* success */
+ }
+
+ /* revert */
+ if (diropq) {
+ AuLabel(revert opq);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ rerr = au_diropq_remove(dentry, bindex);
+ inode_unlock(h_inode);
+ if (rerr) {
+ AuIOErr("%pd reverting diropq failed(%d, %d)\n",
+ dentry, err, rerr);
+ err = -EIO;
+ }
+ }
+
+out_dir:
+ AuLabel(revert dir);
+ rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path);
+ if (rerr) {
+ AuIOErr("%pd reverting dir failed(%d, %d)\n",
+ dentry, err, rerr);
+ err = -EIO;
+ }
+ au_dtime_revert(&a->dt);
+out_unpin:
+ au_unpin(&a->pin);
+ dput(wh_dentry);
+out_parent:
+ di_write_unlock(parent);
+out_unlock:
+ if (unlikely(err)) {
+ au_update_dbtop(dentry);
+ d_drop(dentry);
+ }
+ aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+ au_kfree_rcu(a);
+out:
+ return err;
+}
diff --git a/fs/aufs/i_op_del.c b/fs/aufs/i_op_del.c
new file mode 100644
index 000000000000..665910bd0ace
--- /dev/null
+++ b/fs/aufs/i_op_del.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operations (del entry)
+ */
+
+#include "aufs.h"
+
+/*
+ * decide if a new whiteout for @dentry is necessary or not.
+ * when it is necessary, prepare the parent dir for the upper branch whose
+ * branch index is @bcpup for creation. the actual creation of the whiteout will
+ * be done by caller.
+ * return value:
+ * 0: wh is unnecessary
+ * plus: wh is necessary
+ * minus: error
+ */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup)
+{
+ int need_wh, err;
+ aufs_bindex_t btop;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ btop = au_dbtop(dentry);
+ if (*bcpup < 0) {
+ *bcpup = btop;
+ if (au_test_ro(sb, btop, d_inode(dentry))) {
+ err = AuWbrCopyup(au_sbi(sb), dentry);
+ *bcpup = err;
+ if (unlikely(err < 0))
+ goto out;
+ }
+ } else
+ AuDebugOn(btop < *bcpup
+ || au_test_ro(sb, *bcpup, d_inode(dentry)));
+ AuDbg("bcpup %d, btop %d\n", *bcpup, btop);
+
+ if (*bcpup != btop) {
+ err = au_cpup_dirs(dentry, *bcpup);
+ if (unlikely(err))
+ goto out;
+ need_wh = 1;
+ } else {
+ struct au_dinfo *dinfo, *tmp;
+
+ need_wh = -ENOMEM;
+ dinfo = au_di(dentry);
+ tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+ if (tmp) {
+ au_di_cp(tmp, dinfo);
+ au_di_swap(tmp, dinfo);
+ /* returns the number of positive dentries */
+ need_wh = au_lkup_dentry(dentry, btop + 1,
+ /* AuLkup_IGNORE_PERM */ 0);
+ au_di_swap(tmp, dinfo);
+ au_rw_write_unlock(&tmp->di_rwsem);
+ au_di_free(tmp);
+ }
+ }
+ AuDbg("need_wh %d\n", need_wh);
+ err = need_wh;
+
+out:
+ return err;
+}
+
+/*
+ * simple tests for the del-entry operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent, int isdir)
+{
+ int err;
+ umode_t h_mode;
+ struct dentry *h_dentry, *h_latest;
+ struct inode *h_inode;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (d_really_is_positive(dentry)) {
+ err = -ENOENT;
+ if (unlikely(d_is_negative(h_dentry)))
+ goto out;
+ h_inode = d_inode(h_dentry);
+ if (unlikely(!h_inode->i_nlink))
+ goto out;
+
+ h_mode = h_inode->i_mode;
+ if (!isdir) {
+ err = -EISDIR;
+ if (unlikely(S_ISDIR(h_mode)))
+ goto out;
+ } else if (unlikely(!S_ISDIR(h_mode))) {
+ err = -ENOTDIR;
+ goto out;
+ }
+ } else {
+ /* rename(2) case */
+ err = -EIO;
+ if (unlikely(d_is_positive(h_dentry)))
+ goto out;
+ }
+
+ err = -ENOENT;
+ /* expected parent dir is locked */
+ if (unlikely(h_parent != h_dentry->d_parent))
+ goto out;
+ err = 0;
+
+ /*
+ * rmdir a dir may break the consistency on some filesystem.
+ * let's try heavy test.
+ */
+ err = -EACCES;
+ if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1)
+ && au_test_h_perm(d_inode(h_parent),
+ MAY_EXEC | MAY_WRITE)))
+ goto out;
+
+ h_latest = au_sio_lkup_one(&dentry->d_name, h_parent);
+ err = -EIO;
+ if (IS_ERR(h_latest))
+ goto out;
+ if (h_latest == h_dentry)
+ err = 0;
+ dput(h_latest);
+
+out:
+ return err;
+}
+
+/*
+ * decide the branch where we operate for @dentry. the branch index will be set
+ * @rbcpup. after deciding it, 'pin' it and store the timestamps of the parent
+ * dir for reverting.
+ * when a new whiteout is necessary, create it.
+ */
+static struct dentry*
+lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup,
+ struct au_dtime *dt, struct au_pin *pin)
+{
+ struct dentry *wh_dentry;
+ struct super_block *sb;
+ struct path h_path;
+ int err, need_wh;
+ unsigned int udba;
+ aufs_bindex_t bcpup;
+
+ need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup);
+ wh_dentry = ERR_PTR(need_wh);
+ if (unlikely(need_wh < 0))
+ goto out;
+
+ sb = dentry->d_sb;
+ udba = au_opt_udba(sb);
+ bcpup = *rbcpup;
+ err = au_pin(pin, dentry, bcpup, udba,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ h_path.dentry = au_pinned_h_parent(pin);
+ if (udba != AuOpt_UDBA_NONE
+ && au_dbtop(dentry) == bcpup) {
+ err = au_may_del(dentry, bcpup, h_path.dentry, isdir);
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_unpin;
+ }
+
+ h_path.mnt = au_sbr_mnt(sb, bcpup);
+ au_dtime_store(dt, au_pinned_parent(pin), &h_path);
+ wh_dentry = NULL;
+ if (!need_wh)
+ goto out; /* success, no need to create whiteout */
+
+ wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_unpin;
+
+ /* returns with the parent is locked and wh_dentry is dget-ed */
+ goto out; /* success */
+
+out_unpin:
+ au_unpin(pin);
+out:
+ return wh_dentry;
+}
+
+/*
+ * when removing a dir, rename it to a unique temporary whiteout-ed name first
+ * in order to be revertible and save time for removing many child whiteouts
+ * under the dir.
+ * returns 1 when there are too many child whiteout and caller should remove
+ * them asynchronously. returns 0 when the number of children is enough small to
+ * remove now or the branch fs is a remote fs.
+ * otherwise return an error.
+ */
+static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex,
+ struct au_nhash *whlist, struct inode *dir)
+{
+ int rmdir_later, err, dirwh;
+ struct dentry *h_dentry;
+ struct super_block *sb;
+ struct inode *inode;
+
+ sb = dentry->d_sb;
+ SiMustAnyLock(sb);
+ h_dentry = au_h_dptr(dentry, bindex);
+ err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex));
+ if (unlikely(err))
+ goto out;
+
+ /* stop monitoring */
+ inode = d_inode(dentry);
+ au_hn_free(au_hi(inode, bindex));
+
+ if (!au_test_fs_remote(h_dentry->d_sb)) {
+ dirwh = au_sbi(sb)->si_dirwh;
+ rmdir_later = (dirwh <= 1);
+ if (!rmdir_later)
+ rmdir_later = au_nhash_test_longer_wh(whlist, bindex,
+ dirwh);
+ if (rmdir_later)
+ return rmdir_later;
+ }
+
+ err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist);
+ if (unlikely(err)) {
+ AuIOErr("rmdir %pd, b%d failed, %d. ignored\n",
+ h_dentry, bindex, err);
+ err = 0;
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * final procedure for deleting a entry.
+ * maintain dentry and iattr.
+ */
+static void epilog(struct inode *dir, struct dentry *dentry,
+ aufs_bindex_t bindex)
+{
+ struct inode *inode;
+
+ inode = d_inode(dentry);
+ d_drop(dentry);
+ inode->i_ctime = dir->i_ctime;
+
+ au_dir_ts(dir, bindex);
+ inode_inc_iversion(dir);
+}
+
+/*
+ * when an error happened, remove the created whiteout and revert everything.
+ */
+static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex,
+ aufs_bindex_t bwh, struct dentry *wh_dentry,
+ struct dentry *dentry, struct au_dtime *dt)
+{
+ int rerr;
+ struct path h_path = {
+ .dentry = wh_dentry,
+ .mnt = au_sbr_mnt(dir->i_sb, bindex)
+ };
+
+ rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry);
+ if (!rerr) {
+ au_set_dbwh(dentry, bwh);
+ au_dtime_revert(dt);
+ return 0;
+ }
+
+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n", dentry, err, rerr);
+ return -EIO;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t bwh, bindex, btop;
+ struct inode *inode, *h_dir, *delegated;
+ struct dentry *parent, *wh_dentry;
+ /* to reduce stack size */
+ struct {
+ struct au_dtime dt;
+ struct au_pin pin;
+ struct path h_path;
+ } *a;
+
+ IMustLock(dir);
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+ if (unlikely(err))
+ goto out_free;
+ err = au_d_hashed_positive(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ inode = d_inode(dentry);
+ IMustLock(inode);
+ err = -EISDIR;
+ if (unlikely(d_is_dir(dentry)))
+ goto out_unlock; /* possible? */
+
+ btop = au_dbtop(dentry);
+ bwh = au_dbwh(dentry);
+ bindex = -1;
+ parent = dentry->d_parent; /* dir inode is locked */
+ di_write_lock_parent(parent);
+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &a->dt,
+ &a->pin);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ a->h_path.mnt = au_sbr_mnt(dentry->d_sb, btop);
+ a->h_path.dentry = au_h_dptr(dentry, btop);
+ dget(a->h_path.dentry);
+ if (bindex == btop) {
+ h_dir = au_pinned_h_dir(&a->pin);
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, &a->h_path, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ } else {
+ /* dir inode is locked */
+ h_dir = d_inode(wh_dentry->d_parent);
+ IMustLock(h_dir);
+ err = 0;
+ }
+
+ if (!err) {
+ vfsub_drop_nlink(inode);
+ epilog(dir, dentry, bindex);
+
+ /* update target timestamps */
+ if (bindex == btop) {
+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL);
+ /*ignore*/
+ inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
+ } else
+ /* todo: this timestamp may be reverted later */
+ inode->i_ctime = h_dir->i_ctime;
+ goto out_unpin; /* success */
+ }
+
+ /* revert */
+ if (wh_dentry) {
+ int rerr;
+
+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+ &a->dt);
+ if (rerr)
+ err = rerr;
+ }
+
+out_unpin:
+ au_unpin(&a->pin);
+ dput(wh_dentry);
+ dput(a->h_path.dentry);
+out_parent:
+ di_write_unlock(parent);
+out_unlock:
+ aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+ au_kfree_rcu(a);
+out:
+ return err;
+}
+
+int aufs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ int err, rmdir_later;
+ aufs_bindex_t bwh, bindex, btop;
+ struct inode *inode;
+ struct dentry *parent, *wh_dentry, *h_dentry;
+ struct au_whtmp_rmdir *args;
+ /* to reduce stack size */
+ struct {
+ struct au_dtime dt;
+ struct au_pin pin;
+ } *a;
+
+ IMustLock(dir);
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+ if (unlikely(err))
+ goto out_free;
+ err = au_alive_dir(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ inode = d_inode(dentry);
+ IMustLock(inode);
+ err = -ENOTDIR;
+ if (unlikely(!d_is_dir(dentry)))
+ goto out_unlock; /* possible? */
+
+ err = -ENOMEM;
+ args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS);
+ if (unlikely(!args))
+ goto out_unlock;
+
+ parent = dentry->d_parent; /* dir inode is locked */
+ di_write_lock_parent(parent);
+ err = au_test_empty(dentry, &args->whlist);
+ if (unlikely(err))
+ goto out_parent;
+
+ btop = au_dbtop(dentry);
+ bwh = au_dbwh(dentry);
+ bindex = -1;
+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &a->dt,
+ &a->pin);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ h_dentry = au_h_dptr(dentry, btop);
+ dget(h_dentry);
+ rmdir_later = 0;
+ if (bindex == btop) {
+ err = renwh_and_rmdir(dentry, btop, &args->whlist, dir);
+ if (err > 0) {
+ rmdir_later = err;
+ err = 0;
+ }
+ } else {
+ /* stop monitoring */
+ au_hn_free(au_hi(inode, btop));
+
+ /* dir inode is locked */
+ IMustLock(d_inode(wh_dentry->d_parent));
+ err = 0;
+ }
+
+ if (!err) {
+ vfsub_dead_dir(inode);
+ au_set_dbdiropq(dentry, -1);
+ epilog(dir, dentry, bindex);
+
+ if (rmdir_later) {
+ au_whtmp_kick_rmdir(dir, btop, h_dentry, args);
+ args = NULL;
+ }
+
+ goto out_unpin; /* success */
+ }
+
+ /* revert */
+ AuLabel(revert);
+ if (wh_dentry) {
+ int rerr;
+
+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+ &a->dt);
+ if (rerr)
+ err = rerr;
+ }
+
+out_unpin:
+ au_unpin(&a->pin);
+ dput(wh_dentry);
+ dput(h_dentry);
+out_parent:
+ di_write_unlock(parent);
+ if (args)
+ au_whtmp_rmdir_free(args);
+out_unlock:
+ aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+ au_kfree_rcu(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
diff --git a/fs/aufs/i_op_ren.c b/fs/aufs/i_op_ren.c
new file mode 100644
index 000000000000..9c9966fdde32
--- /dev/null
+++ b/fs/aufs/i_op_ren.c
@@ -0,0 +1,1249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operation (rename entry)
+ * todo: this is crazy monster
+ */
+
+#include "aufs.h"
+
+enum { AuSRC, AuDST, AuSrcDst };
+enum { AuPARENT, AuCHILD, AuParentChild };
+
+#define AuRen_ISDIR_SRC 1
+#define AuRen_ISDIR_DST (1 << 1)
+#define AuRen_ISSAMEDIR (1 << 2)
+#define AuRen_WHSRC (1 << 3)
+#define AuRen_WHDST (1 << 4)
+#define AuRen_MNT_WRITE (1 << 5)
+#define AuRen_DT_DSTDIR (1 << 6)
+#define AuRen_DIROPQ_SRC (1 << 7)
+#define AuRen_DIROPQ_DST (1 << 8)
+#define AuRen_DIRREN (1 << 9)
+#define AuRen_DROPPED_SRC (1 << 10)
+#define AuRen_DROPPED_DST (1 << 11)
+#define au_ftest_ren(flags, name) ((flags) & AuRen_##name)
+#define au_fset_ren(flags, name) \
+ do { (flags) |= AuRen_##name; } while (0)
+#define au_fclr_ren(flags, name) \
+ do { (flags) &= ~AuRen_##name; } while (0)
+
+#ifndef CONFIG_AUFS_DIRREN
+#undef AuRen_DIRREN
+#define AuRen_DIRREN 0
+#endif
+
+struct au_ren_args {
+ struct {
+ struct dentry *dentry, *h_dentry, *parent, *h_parent,
+ *wh_dentry;
+ struct inode *dir, *inode;
+ struct au_hinode *hdir, *hinode;
+ struct au_dtime dt[AuParentChild];
+ aufs_bindex_t btop, bdiropq;
+ } sd[AuSrcDst];
+
+#define src_dentry sd[AuSRC].dentry
+#define src_dir sd[AuSRC].dir
+#define src_inode sd[AuSRC].inode
+#define src_h_dentry sd[AuSRC].h_dentry
+#define src_parent sd[AuSRC].parent
+#define src_h_parent sd[AuSRC].h_parent
+#define src_wh_dentry sd[AuSRC].wh_dentry
+#define src_hdir sd[AuSRC].hdir
+#define src_hinode sd[AuSRC].hinode
+#define src_h_dir sd[AuSRC].hdir->hi_inode
+#define src_dt sd[AuSRC].dt
+#define src_btop sd[AuSRC].btop
+#define src_bdiropq sd[AuSRC].bdiropq
+
+#define dst_dentry sd[AuDST].dentry
+#define dst_dir sd[AuDST].dir
+#define dst_inode sd[AuDST].inode
+#define dst_h_dentry sd[AuDST].h_dentry
+#define dst_parent sd[AuDST].parent
+#define dst_h_parent sd[AuDST].h_parent
+#define dst_wh_dentry sd[AuDST].wh_dentry
+#define dst_hdir sd[AuDST].hdir
+#define dst_hinode sd[AuDST].hinode
+#define dst_h_dir sd[AuDST].hdir->hi_inode
+#define dst_dt sd[AuDST].dt
+#define dst_btop sd[AuDST].btop
+#define dst_bdiropq sd[AuDST].bdiropq
+
+ struct dentry *h_trap;
+ struct au_branch *br;
+ struct path h_path;
+ struct au_nhash whlist;
+ aufs_bindex_t btgt, src_bwh;
+
+ struct {
+ unsigned short auren_flags;
+ unsigned char flags; /* syscall parameter */
+ unsigned char exchange;
+ } __packed;
+
+ struct au_whtmp_rmdir *thargs;
+ struct dentry *h_dst;
+ struct au_hinode *h_root;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * functions for reverting.
+ * when an error happened in a single rename systemcall, we should revert
+ * everything as if nothing happened.
+ * we don't need to revert the copied-up/down the parent dir since they are
+ * harmless.
+ */
+
+#define RevertFailure(fmt, ...) do { \
+ AuIOErr("revert failure: " fmt " (%d, %d)\n", \
+ ##__VA_ARGS__, err, rerr); \
+ err = -EIO; \
+} while (0)
+
+static void au_ren_do_rev_diropq(int err, struct au_ren_args *a, int idx)
+{
+ int rerr;
+ struct dentry *d;
+#define src_or_dst(member) a->sd[idx].member
+
+ d = src_or_dst(dentry); /* {src,dst}_dentry */
+ au_hn_inode_lock_nested(src_or_dst(hinode), AuLsc_I_CHILD);
+ rerr = au_diropq_remove(d, a->btgt);
+ au_hn_inode_unlock(src_or_dst(hinode));
+ au_set_dbdiropq(d, src_or_dst(bdiropq));
+ if (rerr)
+ RevertFailure("remove diropq %pd", d);
+
+#undef src_or_dst_
+}
+
+static void au_ren_rev_diropq(int err, struct au_ren_args *a)
+{
+ if (au_ftest_ren(a->auren_flags, DIROPQ_SRC))
+ au_ren_do_rev_diropq(err, a, AuSRC);
+ if (au_ftest_ren(a->auren_flags, DIROPQ_DST))
+ au_ren_do_rev_diropq(err, a, AuDST);
+}
+
+static void au_ren_rev_rename(int err, struct au_ren_args *a)
+{
+ int rerr;
+ struct inode *delegated;
+
+ a->h_path.dentry = vfsub_lkup_one(&a->src_dentry->d_name,
+ a->src_h_parent);
+ rerr = PTR_ERR(a->h_path.dentry);
+ if (IS_ERR(a->h_path.dentry)) {
+ RevertFailure("lkup one %pd", a->src_dentry);
+ return;
+ }
+
+ delegated = NULL;
+ rerr = vfsub_rename(a->dst_h_dir,
+ au_h_dptr(a->src_dentry, a->btgt),
+ a->src_h_dir, &a->h_path, &delegated, a->flags);
+ if (unlikely(rerr == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal rename\n");
+ iput(delegated);
+ }
+ d_drop(a->h_path.dentry);
+ dput(a->h_path.dentry);
+ /* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */
+ if (rerr)
+ RevertFailure("rename %pd", a->src_dentry);
+}
+
+static void au_ren_rev_whtmp(int err, struct au_ren_args *a)
+{
+ int rerr;
+ struct inode *delegated;
+
+ a->h_path.dentry = vfsub_lkup_one(&a->dst_dentry->d_name,
+ a->dst_h_parent);
+ rerr = PTR_ERR(a->h_path.dentry);
+ if (IS_ERR(a->h_path.dentry)) {
+ RevertFailure("lkup one %pd", a->dst_dentry);
+ return;
+ }
+ if (d_is_positive(a->h_path.dentry)) {
+ d_drop(a->h_path.dentry);
+ dput(a->h_path.dentry);
+ return;
+ }
+
+ delegated = NULL;
+ rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path,
+ &delegated, a->flags);
+ if (unlikely(rerr == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal rename\n");
+ iput(delegated);
+ }
+ d_drop(a->h_path.dentry);
+ dput(a->h_path.dentry);
+ if (!rerr)
+ au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst));
+ else
+ RevertFailure("rename %pd", a->h_dst);
+}
+
+static void au_ren_rev_whsrc(int err, struct au_ren_args *a)
+{
+ int rerr;
+
+ a->h_path.dentry = a->src_wh_dentry;
+ rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry);
+ au_set_dbwh(a->src_dentry, a->src_bwh);
+ if (rerr)
+ RevertFailure("unlink %pd", a->src_wh_dentry);
+}
+#undef RevertFailure
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * when we have to copyup the renaming entry, do it with the rename-target name
+ * in order to minimize the cost (the later actual rename is unnecessary).
+ * otherwise rename it on the target branch.
+ */
+static int au_ren_or_cpup(struct au_ren_args *a)
+{
+ int err;
+ struct dentry *d;
+ struct inode *delegated;
+
+ d = a->src_dentry;
+ if (au_dbtop(d) == a->btgt) {
+ a->h_path.dentry = a->dst_h_dentry;
+ AuDebugOn(au_dbtop(d) != a->btgt);
+ delegated = NULL;
+ err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt),
+ a->dst_h_dir, &a->h_path, &delegated,
+ a->flags);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal rename\n");
+ iput(delegated);
+ }
+ } else
+ BUG();
+
+ if (!err && a->h_dst)
+ /* it will be set to dinfo later */
+ dget(a->h_dst);
+
+ return err;
+}
+
+/* cf. aufs_rmdir() */
+static int au_ren_del_whtmp(struct au_ren_args *a)
+{
+ int err;
+ struct inode *dir;
+
+ dir = a->dst_dir;
+ SiMustAnyLock(dir->i_sb);
+ if (!au_nhash_test_longer_wh(&a->whlist, a->btgt,
+ au_sbi(dir->i_sb)->si_dirwh)
+ || au_test_fs_remote(a->h_dst->d_sb)) {
+ err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist);
+ if (unlikely(err))
+ pr_warn("failed removing whtmp dir %pd (%d), "
+ "ignored.\n", a->h_dst, err);
+ } else {
+ au_nhash_wh_free(&a->thargs->whlist);
+ a->thargs->whlist = a->whlist;
+ a->whlist.nh_num = 0;
+ au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs);
+ dput(a->h_dst);
+ a->thargs = NULL;
+ }
+
+ return 0;
+}
+
+/* make it 'opaque' dir. */
+static int au_ren_do_diropq(struct au_ren_args *a, int idx)
+{
+ int err;
+ struct dentry *d, *diropq;
+#define src_or_dst(member) a->sd[idx].member
+
+ err = 0;
+ d = src_or_dst(dentry); /* {src,dst}_dentry */
+ src_or_dst(bdiropq) = au_dbdiropq(d);
+ src_or_dst(hinode) = au_hi(src_or_dst(inode), a->btgt);
+ au_hn_inode_lock_nested(src_or_dst(hinode), AuLsc_I_CHILD);
+ diropq = au_diropq_create(d, a->btgt);
+ au_hn_inode_unlock(src_or_dst(hinode));
+ if (IS_ERR(diropq))
+ err = PTR_ERR(diropq);
+ else
+ dput(diropq);
+
+#undef src_or_dst_
+ return err;
+}
+
+static int au_ren_diropq(struct au_ren_args *a)
+{
+ int err;
+ unsigned char always;
+ struct dentry *d;
+
+ err = 0;
+ d = a->dst_dentry; /* already renamed on the branch */
+ always = !!au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ);
+ if (au_ftest_ren(a->auren_flags, ISDIR_SRC)
+ && !au_ftest_ren(a->auren_flags, DIRREN)
+ && a->btgt != au_dbdiropq(a->src_dentry)
+ && (a->dst_wh_dentry
+ || a->btgt <= au_dbdiropq(d)
+ /* hide the lower to keep xino */
+ /* the lowers may not be a dir, but we hide them anyway */
+ || a->btgt < au_dbbot(d)
+ || always)) {
+ AuDbg("here\n");
+ err = au_ren_do_diropq(a, AuSRC);
+ if (unlikely(err))
+ goto out;
+ au_fset_ren(a->auren_flags, DIROPQ_SRC);
+ }
+ if (!a->exchange)
+ goto out; /* success */
+
+ d = a->src_dentry; /* already renamed on the branch */
+ if (au_ftest_ren(a->auren_flags, ISDIR_DST)
+ && a->btgt != au_dbdiropq(a->dst_dentry)
+ && (a->btgt < au_dbdiropq(d)
+ || a->btgt < au_dbbot(d)
+ || always)) {
+ AuDbgDentry(a->src_dentry);
+ AuDbgDentry(a->dst_dentry);
+ err = au_ren_do_diropq(a, AuDST);
+ if (unlikely(err))
+ goto out_rev_src;
+ au_fset_ren(a->auren_flags, DIROPQ_DST);
+ }
+ goto out; /* success */
+
+out_rev_src:
+ AuDbg("err %d, reverting src\n", err);
+ au_ren_rev_diropq(err, a);
+out:
+ return err;
+}
+
+static int do_rename(struct au_ren_args *a)
+{
+ int err;
+ struct dentry *d, *h_d;
+
+ if (!a->exchange) {
+ /* prepare workqueue args for asynchronous rmdir */
+ h_d = a->dst_h_dentry;
+ if (au_ftest_ren(a->auren_flags, ISDIR_DST)
+ /* && !au_ftest_ren(a->auren_flags, DIRREN) */
+ && d_is_positive(h_d)) {
+ err = -ENOMEM;
+ a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb,
+ GFP_NOFS);
+ if (unlikely(!a->thargs))
+ goto out;
+ a->h_dst = dget(h_d);
+ }
+
+ /* create whiteout for src_dentry */
+ if (au_ftest_ren(a->auren_flags, WHSRC)) {
+ a->src_bwh = au_dbwh(a->src_dentry);
+ AuDebugOn(a->src_bwh >= 0);
+ a->src_wh_dentry = au_wh_create(a->src_dentry, a->btgt,
+ a->src_h_parent);
+ err = PTR_ERR(a->src_wh_dentry);
+ if (IS_ERR(a->src_wh_dentry))
+ goto out_thargs;
+ }
+
+ /* lookup whiteout for dentry */
+ if (au_ftest_ren(a->auren_flags, WHDST)) {
+ h_d = au_wh_lkup(a->dst_h_parent,
+ &a->dst_dentry->d_name, a->br);
+ err = PTR_ERR(h_d);
+ if (IS_ERR(h_d))
+ goto out_whsrc;
+ if (d_is_negative(h_d))
+ dput(h_d);
+ else
+ a->dst_wh_dentry = h_d;
+ }
+
+ /* rename dentry to tmpwh */
+ if (a->thargs) {
+ err = au_whtmp_ren(a->dst_h_dentry, a->br);
+ if (unlikely(err))
+ goto out_whdst;
+
+ d = a->dst_dentry;
+ au_set_h_dptr(d, a->btgt, NULL);
+ err = au_lkup_neg(d, a->btgt, /*wh*/0);
+ if (unlikely(err))
+ goto out_whtmp;
+ a->dst_h_dentry = au_h_dptr(d, a->btgt);
+ }
+ }
+
+ BUG_ON(d_is_positive(a->dst_h_dentry) && a->src_btop != a->btgt);
+#if 0
+ BUG_ON(!au_ftest_ren(a->auren_flags, DIRREN)
+ && d_is_positive(a->dst_h_dentry)
+ && a->src_btop != a->btgt);
+#endif
+
+ /* rename by vfs_rename or cpup */
+ err = au_ren_or_cpup(a);
+ if (unlikely(err))
+ /* leave the copied-up one */
+ goto out_whtmp;
+
+ /* make dir opaque */
+ err = au_ren_diropq(a);
+ if (unlikely(err))
+ goto out_rename;
+
+ /* update target timestamps */
+ if (a->exchange) {
+ AuDebugOn(au_dbtop(a->dst_dentry) != a->btgt);
+ a->h_path.dentry = au_h_dptr(a->dst_dentry, a->btgt);
+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
+ a->dst_inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
+ }
+ AuDebugOn(au_dbtop(a->src_dentry) != a->btgt);
+ a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt);
+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
+ a->src_inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
+
+ if (!a->exchange) {
+ /* remove whiteout for dentry */
+ if (a->dst_wh_dentry) {
+ a->h_path.dentry = a->dst_wh_dentry;
+ err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path,
+ a->dst_dentry);
+ if (unlikely(err))
+ goto out_diropq;
+ }
+
+ /* remove whtmp */
+ if (a->thargs)
+ au_ren_del_whtmp(a); /* ignore this error */
+
+ au_fhsm_wrote(a->src_dentry->d_sb, a->btgt, /*force*/0);
+ }
+ err = 0;
+ goto out_success;
+
+out_diropq:
+ au_ren_rev_diropq(err, a);
+out_rename:
+ au_ren_rev_rename(err, a);
+ dput(a->h_dst);
+out_whtmp:
+ if (a->thargs)
+ au_ren_rev_whtmp(err, a);
+out_whdst:
+ dput(a->dst_wh_dentry);
+ a->dst_wh_dentry = NULL;
+out_whsrc:
+ if (a->src_wh_dentry)
+ au_ren_rev_whsrc(err, a);
+out_success:
+ dput(a->src_wh_dentry);
+ dput(a->dst_wh_dentry);
+out_thargs:
+ if (a->thargs) {
+ dput(a->h_dst);
+ au_whtmp_rmdir_free(a->thargs);
+ a->thargs = NULL;
+ }
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if @dentry dir can be rename destination or not.
+ * success means, it is a logically empty dir.
+ */
+static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist)
+{
+ return au_test_empty(dentry, whlist);
+}
+
+/*
+ * test if @a->src_dentry dir can be rename source or not.
+ * if it can, return 0.
+ * success means,
+ * - it is a logically empty dir.
+ * - or, it exists on writable branch and has no children including whiteouts
+ * on the lower branch unless DIRREN is on.
+ */
+static int may_rename_srcdir(struct au_ren_args *a)
+{
+ int err;
+ unsigned int rdhash;
+ aufs_bindex_t btop, btgt;
+ struct dentry *dentry;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+
+ dentry = a->src_dentry;
+ sb = dentry->d_sb;
+ sbinfo = au_sbi(sb);
+ if (au_opt_test(sbinfo->si_mntflags, DIRREN))
+ au_fset_ren(a->auren_flags, DIRREN);
+
+ btgt = a->btgt;
+ btop = au_dbtop(dentry);
+ if (btop != btgt) {
+ struct au_nhash whlist;
+
+ SiMustAnyLock(sb);
+ rdhash = sbinfo->si_rdhash;
+ if (!rdhash)
+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL,
+ dentry));
+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_test_empty(dentry, &whlist);
+ au_nhash_wh_free(&whlist);
+ goto out;
+ }
+
+ if (btop == au_dbtaildir(dentry))
+ return 0; /* success */
+
+ err = au_test_empty_lower(dentry);
+
+out:
+ if (err == -ENOTEMPTY) {
+ if (au_ftest_ren(a->auren_flags, DIRREN)) {
+ err = 0;
+ } else {
+ AuWarn1("renaming dir who has child(ren) on multiple "
+ "branches, is not supported\n");
+ err = -EXDEV;
+ }
+ }
+ return err;
+}
+
+/* side effect: sets whlist and h_dentry */
+static int au_ren_may_dir(struct au_ren_args *a)
+{
+ int err;
+ unsigned int rdhash;
+ struct dentry *d;
+
+ d = a->dst_dentry;
+ SiMustAnyLock(d->d_sb);
+
+ err = 0;
+ if (au_ftest_ren(a->auren_flags, ISDIR_DST) && a->dst_inode) {
+ rdhash = au_sbi(d->d_sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, d));
+ err = au_nhash_alloc(&a->whlist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+
+ if (!a->exchange) {
+ au_set_dbtop(d, a->dst_btop);
+ err = may_rename_dstdir(d, &a->whlist);
+ au_set_dbtop(d, a->btgt);
+ } else
+ err = may_rename_srcdir(a);
+ }
+ a->dst_h_dentry = au_h_dptr(d, au_dbtop(d));
+ if (unlikely(err))
+ goto out;
+
+ d = a->src_dentry;
+ a->src_h_dentry = au_h_dptr(d, au_dbtop(d));
+ if (au_ftest_ren(a->auren_flags, ISDIR_SRC)) {
+ err = may_rename_srcdir(a);
+ if (unlikely(err)) {
+ au_nhash_wh_free(&a->whlist);
+ a->whlist.nh_num = 0;
+ }
+ }
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * simple tests for rename.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+static int au_may_ren(struct au_ren_args *a)
+{
+ int err, isdir;
+ struct inode *h_inode;
+
+ if (a->src_btop == a->btgt) {
+ err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent,
+ au_ftest_ren(a->auren_flags, ISDIR_SRC));
+ if (unlikely(err))
+ goto out;
+ err = -EINVAL;
+ if (unlikely(a->src_h_dentry == a->h_trap))
+ goto out;
+ }
+
+ err = 0;
+ if (a->dst_btop != a->btgt)
+ goto out;
+
+ err = -ENOTEMPTY;
+ if (unlikely(a->dst_h_dentry == a->h_trap))
+ goto out;
+
+ err = -EIO;
+ isdir = !!au_ftest_ren(a->auren_flags, ISDIR_DST);
+ if (d_really_is_negative(a->dst_dentry)) {
+ if (d_is_negative(a->dst_h_dentry))
+ err = au_may_add(a->dst_dentry, a->btgt,
+ a->dst_h_parent, isdir);
+ } else {
+ if (unlikely(d_is_negative(a->dst_h_dentry)))
+ goto out;
+ h_inode = d_inode(a->dst_h_dentry);
+ if (h_inode->i_nlink)
+ err = au_may_del(a->dst_dentry, a->btgt,
+ a->dst_h_parent, isdir);
+ }
+
+out:
+ if (unlikely(err == -ENOENT || err == -EEXIST))
+ err = -EIO;
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * locking order
+ * (VFS)
+ * - src_dir and dir by lock_rename()
+ * - inode if exists
+ * (aufs)
+ * - lock all
+ * + src_dentry and dentry by aufs_read_and_write_lock2() which calls,
+ * + si_read_lock
+ * + di_write_lock2_child()
+ * + di_write_lock_child()
+ * + ii_write_lock_child()
+ * + di_write_lock_child2()
+ * + ii_write_lock_child2()
+ * + src_parent and parent
+ * + di_write_lock_parent()
+ * + ii_write_lock_parent()
+ * + di_write_lock_parent2()
+ * + ii_write_lock_parent2()
+ * + lower src_dir and dir by vfsub_lock_rename()
+ * + verify the every relationships between child and parent. if any
+ * of them failed, unlock all and return -EBUSY.
+ */
+static void au_ren_unlock(struct au_ren_args *a)
+{
+ vfsub_unlock_rename(a->src_h_parent, a->src_hdir,
+ a->dst_h_parent, a->dst_hdir);
+ if (au_ftest_ren(a->auren_flags, DIRREN)
+ && a->h_root)
+ au_hn_inode_unlock(a->h_root);
+ if (au_ftest_ren(a->auren_flags, MNT_WRITE))
+ vfsub_mnt_drop_write(au_br_mnt(a->br));
+}
+
+static int au_ren_lock(struct au_ren_args *a)
+{
+ int err;
+ unsigned int udba;
+
+ err = 0;
+ a->src_h_parent = au_h_dptr(a->src_parent, a->btgt);
+ a->src_hdir = au_hi(a->src_dir, a->btgt);
+ a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt);
+ a->dst_hdir = au_hi(a->dst_dir, a->btgt);
+
+ err = vfsub_mnt_want_write(au_br_mnt(a->br));
+ if (unlikely(err))
+ goto out;
+ au_fset_ren(a->auren_flags, MNT_WRITE);
+ if (au_ftest_ren(a->auren_flags, DIRREN)) {
+ struct dentry *root;
+ struct inode *dir;
+
+ /*
+ * sbinfo is already locked, so this ii_read_lock is
+ * unnecessary. but our debugging feature checks it.
+ */
+ root = a->src_inode->i_sb->s_root;
+ if (root != a->src_parent && root != a->dst_parent) {
+ dir = d_inode(root);
+ ii_read_lock_parent3(dir);
+ a->h_root = au_hi(dir, a->btgt);
+ ii_read_unlock(dir);
+ au_hn_inode_lock_nested(a->h_root, AuLsc_I_PARENT3);
+ }
+ }
+ a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir,
+ a->dst_h_parent, a->dst_hdir);
+ udba = au_opt_udba(a->src_dentry->d_sb);
+ if (unlikely(a->src_hdir->hi_inode != d_inode(a->src_h_parent)
+ || a->dst_hdir->hi_inode != d_inode(a->dst_h_parent)))
+ err = au_busy_or_stale();
+ if (!err && au_dbtop(a->src_dentry) == a->btgt)
+ err = au_h_verify(a->src_h_dentry, udba,
+ d_inode(a->src_h_parent), a->src_h_parent,
+ a->br);
+ if (!err && au_dbtop(a->dst_dentry) == a->btgt)
+ err = au_h_verify(a->dst_h_dentry, udba,
+ d_inode(a->dst_h_parent), a->dst_h_parent,
+ a->br);
+ if (!err)
+ goto out; /* success */
+
+ err = au_busy_or_stale();
+ au_ren_unlock(a);
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_ren_refresh_dir(struct au_ren_args *a)
+{
+ struct inode *dir;
+
+ dir = a->dst_dir;
+ inode_inc_iversion(dir);
+ if (au_ftest_ren(a->auren_flags, ISDIR_SRC)) {
+ /* is this updating defined in POSIX? */
+ au_cpup_attr_timesizes(a->src_inode);
+ au_cpup_attr_nlink(dir, /*force*/1);
+ }
+ au_dir_ts(dir, a->btgt);
+
+ if (a->exchange) {
+ dir = a->src_dir;
+ inode_inc_iversion(dir);
+ if (au_ftest_ren(a->auren_flags, ISDIR_DST)) {
+ /* is this updating defined in POSIX? */
+ au_cpup_attr_timesizes(a->dst_inode);
+ au_cpup_attr_nlink(dir, /*force*/1);
+ }
+ au_dir_ts(dir, a->btgt);
+ }
+
+ if (au_ftest_ren(a->auren_flags, ISSAMEDIR))
+ return;
+
+ dir = a->src_dir;
+ inode_inc_iversion(dir);
+ if (au_ftest_ren(a->auren_flags, ISDIR_SRC))
+ au_cpup_attr_nlink(dir, /*force*/1);
+ au_dir_ts(dir, a->btgt);
+}
+
+static void au_ren_refresh(struct au_ren_args *a)
+{
+ aufs_bindex_t bbot, bindex;
+ struct dentry *d, *h_d;
+ struct inode *i, *h_i;
+ struct super_block *sb;
+
+ d = a->dst_dentry;
+ d_drop(d);
+ if (a->h_dst)
+ /* already dget-ed by au_ren_or_cpup() */
+ au_set_h_dptr(d, a->btgt, a->h_dst);
+
+ i = a->dst_inode;
+ if (i) {
+ if (!a->exchange) {
+ if (!au_ftest_ren(a->auren_flags, ISDIR_DST))
+ vfsub_drop_nlink(i);
+ else {
+ vfsub_dead_dir(i);
+ au_cpup_attr_timesizes(i);
+ }
+ au_update_dbrange(d, /*do_put_zero*/1);
+ } else
+ au_cpup_attr_nlink(i, /*force*/1);
+ } else {
+ bbot = a->btgt;
+ for (bindex = au_dbtop(d); bindex < bbot; bindex++)
+ au_set_h_dptr(d, bindex, NULL);
+ bbot = au_dbbot(d);
+ for (bindex = a->btgt + 1; bindex <= bbot; bindex++)
+ au_set_h_dptr(d, bindex, NULL);
+ au_update_dbrange(d, /*do_put_zero*/0);
+ }
+
+ if (a->exchange
+ || au_ftest_ren(a->auren_flags, DIRREN)) {
+ d_drop(a->src_dentry);
+ if (au_ftest_ren(a->auren_flags, DIRREN))
+ au_set_dbwh(a->src_dentry, -1);
+ return;
+ }
+
+ d = a->src_dentry;
+ au_set_dbwh(d, -1);
+ bbot = au_dbbot(d);
+ for (bindex = a->btgt + 1; bindex <= bbot; bindex++) {
+ h_d = au_h_dptr(d, bindex);
+ if (h_d)
+ au_set_h_dptr(d, bindex, NULL);
+ }
+ au_set_dbbot(d, a->btgt);
+
+ sb = d->d_sb;
+ i = a->src_inode;
+ if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i))
+ return; /* success */
+
+ bbot = au_ibbot(i);
+ for (bindex = a->btgt + 1; bindex <= bbot; bindex++) {
+ h_i = au_h_iptr(i, bindex);
+ if (h_i) {
+ au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0);
+ /* ignore this error */
+ au_set_h_iptr(i, bindex, NULL, 0);
+ }
+ }
+ au_set_ibbot(i, a->btgt);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* mainly for link(2) and rename(2) */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt)
+{
+ aufs_bindex_t bdiropq, bwh;
+ struct dentry *parent;
+ struct au_branch *br;
+
+ parent = dentry->d_parent;
+ IMustLock(d_inode(parent)); /* dir is locked */
+
+ bdiropq = au_dbdiropq(parent);
+ bwh = au_dbwh(dentry);
+ br = au_sbr(dentry->d_sb, btgt);
+ if (au_br_rdonly(br)
+ || (0 <= bdiropq && bdiropq < btgt)
+ || (0 <= bwh && bwh < btgt))
+ btgt = -1;
+
+ AuDbg("btgt %d\n", btgt);
+ return btgt;
+}
+
+/* sets src_btop, dst_btop and btgt */
+static int au_ren_wbr(struct au_ren_args *a)
+{
+ int err;
+ struct au_wr_dir_args wr_dir_args = {
+ /* .force_btgt = -1, */
+ .flags = AuWrDir_ADD_ENTRY
+ };
+
+ a->src_btop = au_dbtop(a->src_dentry);
+ a->dst_btop = au_dbtop(a->dst_dentry);
+ if (au_ftest_ren(a->auren_flags, ISDIR_SRC)
+ || au_ftest_ren(a->auren_flags, ISDIR_DST))
+ au_fset_wrdir(wr_dir_args.flags, ISDIR);
+ wr_dir_args.force_btgt = a->src_btop;
+ if (a->dst_inode && a->dst_btop < a->src_btop)
+ wr_dir_args.force_btgt = a->dst_btop;
+ wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt);
+ err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args);
+ a->btgt = err;
+ if (a->exchange)
+ au_update_dbtop(a->dst_dentry);
+
+ return err;
+}
+
+static void au_ren_dt(struct au_ren_args *a)
+{
+ a->h_path.dentry = a->src_h_parent;
+ au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path);
+ if (!au_ftest_ren(a->auren_flags, ISSAMEDIR)) {
+ a->h_path.dentry = a->dst_h_parent;
+ au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path);
+ }
+
+ au_fclr_ren(a->auren_flags, DT_DSTDIR);
+ if (!au_ftest_ren(a->auren_flags, ISDIR_SRC)
+ && !a->exchange)
+ return;
+
+ a->h_path.dentry = a->src_h_dentry;
+ au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path);
+ if (d_is_positive(a->dst_h_dentry)) {
+ au_fset_ren(a->auren_flags, DT_DSTDIR);
+ a->h_path.dentry = a->dst_h_dentry;
+ au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path);
+ }
+}
+
+static void au_ren_rev_dt(int err, struct au_ren_args *a)
+{
+ struct dentry *h_d;
+ struct inode *h_inode;
+
+ au_dtime_revert(a->src_dt + AuPARENT);
+ if (!au_ftest_ren(a->auren_flags, ISSAMEDIR))
+ au_dtime_revert(a->dst_dt + AuPARENT);
+
+ if (au_ftest_ren(a->auren_flags, ISDIR_SRC) && err != -EIO) {
+ h_d = a->src_dt[AuCHILD].dt_h_path.dentry;
+ h_inode = d_inode(h_d);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ au_dtime_revert(a->src_dt + AuCHILD);
+ inode_unlock(h_inode);
+
+ if (au_ftest_ren(a->auren_flags, DT_DSTDIR)) {
+ h_d = a->dst_dt[AuCHILD].dt_h_path.dentry;
+ h_inode = d_inode(h_d);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ au_dtime_revert(a->dst_dt + AuCHILD);
+ inode_unlock(h_inode);
+ }
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry,
+ struct inode *_dst_dir, struct dentry *_dst_dentry,
+ unsigned int _flags)
+{
+ int err, lock_flags;
+ void *rev;
+ /* reduce stack space */
+ struct au_ren_args *a;
+ struct au_pin pin;
+
+ AuDbg("%pd, %pd, 0x%x\n", _src_dentry, _dst_dentry, _flags);
+ IMustLock(_src_dir);
+ IMustLock(_dst_dir);
+
+ err = -EINVAL;
+ if (unlikely(_flags & RENAME_WHITEOUT))
+ goto out;
+
+ err = -ENOMEM;
+ BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE);
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ a->flags = _flags;
+ BUILD_BUG_ON(sizeof(a->exchange) == sizeof(u8)
+ && RENAME_EXCHANGE > U8_MAX);
+ a->exchange = _flags & RENAME_EXCHANGE;
+ a->src_dir = _src_dir;
+ a->src_dentry = _src_dentry;
+ a->src_inode = NULL;
+ if (d_really_is_positive(a->src_dentry))
+ a->src_inode = d_inode(a->src_dentry);
+ a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */
+ a->dst_dir = _dst_dir;
+ a->dst_dentry = _dst_dentry;
+ a->dst_inode = NULL;
+ if (d_really_is_positive(a->dst_dentry))
+ a->dst_inode = d_inode(a->dst_dentry);
+ a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */
+ if (a->dst_inode) {
+ /*
+ * if EXCHANGE && src is non-dir && dst is dir,
+ * dst is not locked.
+ */
+ /* IMustLock(a->dst_inode); */
+ au_igrab(a->dst_inode);
+ }
+
+ err = -ENOTDIR;
+ lock_flags = AuLock_FLUSH | AuLock_NOPLM | AuLock_GEN;
+ if (d_is_dir(a->src_dentry)) {
+ au_fset_ren(a->auren_flags, ISDIR_SRC);
+ if (unlikely(!a->exchange
+ && d_really_is_positive(a->dst_dentry)
+ && !d_is_dir(a->dst_dentry)))
+ goto out_free;
+ lock_flags |= AuLock_DIRS;
+ }
+ if (a->dst_inode && d_is_dir(a->dst_dentry)) {
+ au_fset_ren(a->auren_flags, ISDIR_DST);
+ if (unlikely(!a->exchange
+ && d_really_is_positive(a->src_dentry)
+ && !d_is_dir(a->src_dentry)))
+ goto out_free;
+ lock_flags |= AuLock_DIRS;
+ }
+ err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
+ lock_flags);
+ if (unlikely(err))
+ goto out_free;
+
+ err = au_d_hashed_positive(a->src_dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ err = -ENOENT;
+ if (a->dst_inode) {
+ /*
+ * If it is a dir, VFS unhash it before this
+ * function. It means we cannot rely upon d_unhashed().
+ */
+ if (unlikely(!a->dst_inode->i_nlink))
+ goto out_unlock;
+ if (!au_ftest_ren(a->auren_flags, ISDIR_DST)) {
+ err = au_d_hashed_positive(a->dst_dentry);
+ if (unlikely(err && !a->exchange))
+ goto out_unlock;
+ } else if (unlikely(IS_DEADDIR(a->dst_inode)))
+ goto out_unlock;
+ } else if (unlikely(d_unhashed(a->dst_dentry)))
+ goto out_unlock;
+
+ /*
+ * is it possible?
+ * yes, it happened (in linux-3.3-rcN) but I don't know why.
+ * there may exist a problem somewhere else.
+ */
+ err = -EINVAL;
+ if (unlikely(d_inode(a->dst_parent) == d_inode(a->src_dentry)))
+ goto out_unlock;
+
+ au_fset_ren(a->auren_flags, ISSAMEDIR); /* temporary */
+ di_write_lock_parent(a->dst_parent);
+
+ /* which branch we process */
+ err = au_ren_wbr(a);
+ if (unlikely(err < 0))
+ goto out_parent;
+ a->br = au_sbr(a->dst_dentry->d_sb, a->btgt);
+ a->h_path.mnt = au_br_mnt(a->br);
+
+ /* are they available to be renamed */
+ err = au_ren_may_dir(a);
+ if (unlikely(err))
+ goto out_children;
+
+ /* prepare the writable parent dir on the same branch */
+ if (a->dst_btop == a->btgt) {
+ au_fset_ren(a->auren_flags, WHDST);
+ } else {
+ err = au_cpup_dirs(a->dst_dentry, a->btgt);
+ if (unlikely(err))
+ goto out_children;
+ }
+
+ err = 0;
+ if (!a->exchange) {
+ if (a->src_dir != a->dst_dir) {
+ /*
+ * this temporary unlock is safe,
+ * because both dir->i_mutex are locked.
+ */
+ di_write_unlock(a->dst_parent);
+ di_write_lock_parent(a->src_parent);
+ err = au_wr_dir_need_wh(a->src_dentry,
+ au_ftest_ren(a->auren_flags,
+ ISDIR_SRC),
+ &a->btgt);
+ di_write_unlock(a->src_parent);
+ di_write_lock2_parent(a->src_parent, a->dst_parent,
+ /*isdir*/1);
+ au_fclr_ren(a->auren_flags, ISSAMEDIR);
+ } else
+ err = au_wr_dir_need_wh(a->src_dentry,
+ au_ftest_ren(a->auren_flags,
+ ISDIR_SRC),
+ &a->btgt);
+ }
+ if (unlikely(err < 0))
+ goto out_children;
+ if (err)
+ au_fset_ren(a->auren_flags, WHSRC);
+
+ /* cpup src */
+ if (a->src_btop != a->btgt) {
+ err = au_pin(&pin, a->src_dentry, a->btgt,
+ au_opt_udba(a->src_dentry->d_sb),
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (!err) {
+ struct au_cp_generic cpg = {
+ .dentry = a->src_dentry,
+ .bdst = a->btgt,
+ .bsrc = a->src_btop,
+ .len = -1,
+ .pin = &pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+ AuDebugOn(au_dbtop(a->src_dentry) != a->src_btop);
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&pin);
+ }
+ if (unlikely(err))
+ goto out_children;
+ a->src_btop = a->btgt;
+ a->src_h_dentry = au_h_dptr(a->src_dentry, a->btgt);
+ if (!a->exchange)
+ au_fset_ren(a->auren_flags, WHSRC);
+ }
+
+ /* cpup dst */
+ if (a->exchange && a->dst_inode
+ && a->dst_btop != a->btgt) {
+ err = au_pin(&pin, a->dst_dentry, a->btgt,
+ au_opt_udba(a->dst_dentry->d_sb),
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (!err) {
+ struct au_cp_generic cpg = {
+ .dentry = a->dst_dentry,
+ .bdst = a->btgt,
+ .bsrc = a->dst_btop,
+ .len = -1,
+ .pin = &pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&pin);
+ }
+ if (unlikely(err))
+ goto out_children;
+ a->dst_btop = a->btgt;
+ a->dst_h_dentry = au_h_dptr(a->dst_dentry, a->btgt);
+ }
+
+ /* lock them all */
+ err = au_ren_lock(a);
+ if (unlikely(err))
+ /* leave the copied-up one */
+ goto out_children;
+
+ if (!a->exchange) {
+ if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE))
+ err = au_may_ren(a);
+ else if (unlikely(a->dst_dentry->d_name.len > AUFS_MAX_NAMELEN))
+ err = -ENAMETOOLONG;
+ if (unlikely(err))
+ goto out_hdir;
+ }
+
+ /* store timestamps to be revertible */
+ au_ren_dt(a);
+
+ /* store dirren info */
+ if (au_ftest_ren(a->auren_flags, DIRREN)) {
+ err = au_dr_rename(a->src_dentry, a->btgt,
+ &a->dst_dentry->d_name, &rev);
+ AuTraceErr(err);
+ if (unlikely(err))
+ goto out_dt;
+ }
+
+ /* here we go */
+ err = do_rename(a);
+ if (unlikely(err))
+ goto out_dirren;
+
+ if (au_ftest_ren(a->auren_flags, DIRREN))
+ au_dr_rename_fin(a->src_dentry, a->btgt, rev);
+
+ /* update dir attributes */
+ au_ren_refresh_dir(a);
+
+ /* dput/iput all lower dentries */
+ au_ren_refresh(a);
+
+ goto out_hdir; /* success */
+
+out_dirren:
+ if (au_ftest_ren(a->auren_flags, DIRREN))
+ au_dr_rename_rev(a->src_dentry, a->btgt, rev);
+out_dt:
+ au_ren_rev_dt(err, a);
+out_hdir:
+ au_ren_unlock(a);
+out_children:
+ au_nhash_wh_free(&a->whlist);
+ if (err && a->dst_inode && a->dst_btop != a->btgt) {
+ AuDbg("btop %d, btgt %d\n", a->dst_btop, a->btgt);
+ au_set_h_dptr(a->dst_dentry, a->btgt, NULL);
+ au_set_dbtop(a->dst_dentry, a->dst_btop);
+ }
+out_parent:
+ if (!err) {
+ if (d_unhashed(a->src_dentry))
+ au_fset_ren(a->auren_flags, DROPPED_SRC);
+ if (d_unhashed(a->dst_dentry))
+ au_fset_ren(a->auren_flags, DROPPED_DST);
+ if (!a->exchange)
+ d_move(a->src_dentry, a->dst_dentry);
+ else {
+ d_exchange(a->src_dentry, a->dst_dentry);
+ if (au_ftest_ren(a->auren_flags, DROPPED_DST))
+ d_drop(a->dst_dentry);
+ }
+ if (au_ftest_ren(a->auren_flags, DROPPED_SRC))
+ d_drop(a->src_dentry);
+ } else {
+ au_update_dbtop(a->dst_dentry);
+ if (!a->dst_inode)
+ d_drop(a->dst_dentry);
+ }
+ if (au_ftest_ren(a->auren_flags, ISSAMEDIR))
+ di_write_unlock(a->dst_parent);
+ else
+ di_write_unlock2(a->src_parent, a->dst_parent);
+out_unlock:
+ aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry);
+out_free:
+ iput(a->dst_inode);
+ if (a->thargs)
+ au_whtmp_rmdir_free(a->thargs);
+ au_kfree_rcu(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
diff --git a/fs/aufs/iinfo.c b/fs/aufs/iinfo.c
new file mode 100644
index 000000000000..098fd115052a
--- /dev/null
+++ b/fs/aufs/iinfo.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode private data
+ */
+
+#include "aufs.h"
+
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex)
+{
+ struct inode *h_inode;
+ struct au_hinode *hinode;
+
+ IiMustAnyLock(inode);
+
+ hinode = au_hinode(au_ii(inode), bindex);
+ h_inode = hinode->hi_inode;
+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+ return h_inode;
+}
+
+/* todo: hard/soft set? */
+void au_hiput(struct au_hinode *hinode)
+{
+ au_hn_free(hinode);
+ dput(hinode->hi_whdentry);
+ iput(hinode->hi_inode);
+}
+
+unsigned int au_hi_flags(struct inode *inode, int isdir)
+{
+ unsigned int flags;
+ const unsigned int mnt_flags = au_mntflags(inode->i_sb);
+
+ flags = 0;
+ if (au_opt_test(mnt_flags, XINO))
+ au_fset_hi(flags, XINO);
+ if (isdir && au_opt_test(mnt_flags, UDBA_HNOTIFY))
+ au_fset_hi(flags, HNOTIFY);
+ return flags;
+}
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+ struct inode *h_inode, unsigned int flags)
+{
+ struct au_hinode *hinode;
+ struct inode *hi;
+ struct au_iinfo *iinfo = au_ii(inode);
+
+ IiMustWriteLock(inode);
+
+ hinode = au_hinode(iinfo, bindex);
+ hi = hinode->hi_inode;
+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+
+ if (hi)
+ au_hiput(hinode);
+ hinode->hi_inode = h_inode;
+ if (h_inode) {
+ int err;
+ struct super_block *sb = inode->i_sb;
+ struct au_branch *br;
+
+ AuDebugOn(inode->i_mode
+ && (h_inode->i_mode & S_IFMT)
+ != (inode->i_mode & S_IFMT));
+ if (bindex == iinfo->ii_btop)
+ au_cpup_igen(inode, h_inode);
+ br = au_sbr(sb, bindex);
+ hinode->hi_id = br->br_id;
+ if (au_ftest_hi(flags, XINO)) {
+ err = au_xino_write(sb, bindex, h_inode->i_ino,
+ inode->i_ino);
+ if (unlikely(err))
+ AuIOErr1("failed au_xino_write() %d\n", err);
+ }
+
+ if (au_ftest_hi(flags, HNOTIFY)
+ && au_br_hnotifyable(br->br_perm)) {
+ err = au_hn_alloc(hinode, inode);
+ if (unlikely(err))
+ AuIOErr1("au_hn_alloc() %d\n", err);
+ }
+ }
+}
+
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_wh)
+{
+ struct au_hinode *hinode;
+
+ IiMustWriteLock(inode);
+
+ hinode = au_hinode(au_ii(inode), bindex);
+ AuDebugOn(hinode->hi_whdentry);
+ hinode->hi_whdentry = h_wh;
+}
+
+void au_update_iigen(struct inode *inode, int half)
+{
+ struct au_iinfo *iinfo;
+ struct au_iigen *iigen;
+ unsigned int sigen;
+
+ sigen = au_sigen(inode->i_sb);
+ iinfo = au_ii(inode);
+ iigen = &iinfo->ii_generation;
+ spin_lock(&iigen->ig_spin);
+ iigen->ig_generation = sigen;
+ if (half)
+ au_ig_fset(iigen->ig_flags, HALF_REFRESHED);
+ else
+ au_ig_fclr(iigen->ig_flags, HALF_REFRESHED);
+ spin_unlock(&iigen->ig_spin);
+}
+
+/* it may be called at remount time, too */
+void au_update_ibrange(struct inode *inode, int do_put_zero)
+{
+ struct au_iinfo *iinfo;
+ aufs_bindex_t bindex, bbot;
+
+ AuDebugOn(au_is_bad_inode(inode));
+ IiMustWriteLock(inode);
+
+ iinfo = au_ii(inode);
+ if (do_put_zero && iinfo->ii_btop >= 0) {
+ for (bindex = iinfo->ii_btop; bindex <= iinfo->ii_bbot;
+ bindex++) {
+ struct inode *h_i;
+
+ h_i = au_hinode(iinfo, bindex)->hi_inode;
+ if (h_i
+ && !h_i->i_nlink
+ && !(h_i->i_state & I_LINKABLE))
+ au_set_h_iptr(inode, bindex, NULL, 0);
+ }
+ }
+
+ iinfo->ii_btop = -1;
+ iinfo->ii_bbot = -1;
+ bbot = au_sbbot(inode->i_sb);
+ for (bindex = 0; bindex <= bbot; bindex++)
+ if (au_hinode(iinfo, bindex)->hi_inode) {
+ iinfo->ii_btop = bindex;
+ break;
+ }
+ if (iinfo->ii_btop >= 0)
+ for (bindex = bbot; bindex >= iinfo->ii_btop; bindex--)
+ if (au_hinode(iinfo, bindex)->hi_inode) {
+ iinfo->ii_bbot = bindex;
+ break;
+ }
+ AuDebugOn(iinfo->ii_btop > iinfo->ii_bbot);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_icntnr_init_once(void *_c)
+{
+ struct au_icntnr *c = _c;
+ struct au_iinfo *iinfo = &c->iinfo;
+
+ spin_lock_init(&iinfo->ii_generation.ig_spin);
+ au_rw_init(&iinfo->ii_rwsem);
+ inode_init_once(&c->vfs_inode);
+}
+
+void au_hinode_init(struct au_hinode *hinode)
+{
+ hinode->hi_inode = NULL;
+ hinode->hi_id = -1;
+ au_hn_init(hinode);
+ hinode->hi_whdentry = NULL;
+}
+
+int au_iinfo_init(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+ struct super_block *sb;
+ struct au_hinode *hi;
+ int nbr, i;
+
+ sb = inode->i_sb;
+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+ nbr = au_sbbot(sb) + 1;
+ if (unlikely(nbr <= 0))
+ nbr = 1;
+ hi = kmalloc_array(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS);
+ if (hi) {
+ au_lcnt_inc(&au_sbi(sb)->si_ninodes);
+
+ iinfo->ii_hinode = hi;
+ for (i = 0; i < nbr; i++, hi++)
+ au_hinode_init(hi);
+
+ iinfo->ii_generation.ig_generation = au_sigen(sb);
+ iinfo->ii_btop = -1;
+ iinfo->ii_bbot = -1;
+ iinfo->ii_vdir = NULL;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+int au_hinode_realloc(struct au_iinfo *iinfo, int nbr, int may_shrink)
+{
+ int err, i;
+ struct au_hinode *hip;
+
+ AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+ err = -ENOMEM;
+ hip = au_krealloc(iinfo->ii_hinode, sizeof(*hip) * nbr, GFP_NOFS,
+ may_shrink);
+ if (hip) {
+ iinfo->ii_hinode = hip;
+ i = iinfo->ii_bbot + 1;
+ hip += i;
+ for (; i < nbr; i++, hip++)
+ au_hinode_init(hip);
+ err = 0;
+ }
+
+ return err;
+}
+
+void au_iinfo_fin(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+ struct au_hinode *hi;
+ struct super_block *sb;
+ aufs_bindex_t bindex, bbot;
+ const unsigned char unlinked = !inode->i_nlink;
+
+ AuDebugOn(au_is_bad_inode(inode));
+
+ sb = inode->i_sb;
+ au_lcnt_dec(&au_sbi(sb)->si_ninodes);
+ if (si_pid_test(sb))
+ au_xino_delete_inode(inode, unlinked);
+ else {
+ /*
+ * it is safe to hide the dependency between sbinfo and
+ * sb->s_umount.
+ */
+ lockdep_off();
+ si_noflush_read_lock(sb);
+ au_xino_delete_inode(inode, unlinked);
+ si_read_unlock(sb);
+ lockdep_on();
+ }
+
+ iinfo = au_ii(inode);
+ if (iinfo->ii_vdir)
+ au_vdir_free(iinfo->ii_vdir);
+
+ bindex = iinfo->ii_btop;
+ if (bindex >= 0) {
+ hi = au_hinode(iinfo, bindex);
+ bbot = iinfo->ii_bbot;
+ while (bindex++ <= bbot) {
+ if (hi->hi_inode)
+ au_hiput(hi);
+ hi++;
+ }
+ }
+ au_kfree_rcu(iinfo->ii_hinode);
+ AuRwDestroy(&iinfo->ii_rwsem);
+}
diff --git a/fs/aufs/inode.c b/fs/aufs/inode.c
new file mode 100644
index 000000000000..ee1e6756a2f5
--- /dev/null
+++ b/fs/aufs/inode.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode functions
+ */
+
+#include "aufs.h"
+
+struct inode *au_igrab(struct inode *inode)
+{
+ if (inode) {
+ AuDebugOn(!atomic_read(&inode->i_count));
+ ihold(inode);
+ }
+ return inode;
+}
+
+static void au_refresh_hinode_attr(struct inode *inode, int do_version)
+{
+ au_cpup_attr_all(inode, /*force*/0);
+ au_update_iigen(inode, /*half*/1);
+ if (do_version)
+ inode_inc_iversion(inode);
+}
+
+static int au_ii_refresh(struct inode *inode, int *update)
+{
+ int err, e, nbr;
+ umode_t type;
+ aufs_bindex_t bindex, new_bindex;
+ struct super_block *sb;
+ struct au_iinfo *iinfo;
+ struct au_hinode *p, *q, tmp;
+
+ AuDebugOn(au_is_bad_inode(inode));
+ IiMustWriteLock(inode);
+
+ *update = 0;
+ sb = inode->i_sb;
+ nbr = au_sbbot(sb) + 1;
+ type = inode->i_mode & S_IFMT;
+ iinfo = au_ii(inode);
+ err = au_hinode_realloc(iinfo, nbr, /*may_shrink*/0);
+ if (unlikely(err))
+ goto out;
+
+ AuDebugOn(iinfo->ii_btop < 0);
+ p = au_hinode(iinfo, iinfo->ii_btop);
+ for (bindex = iinfo->ii_btop; bindex <= iinfo->ii_bbot;
+ bindex++, p++) {
+ if (!p->hi_inode)
+ continue;
+
+ AuDebugOn(type != (p->hi_inode->i_mode & S_IFMT));
+ new_bindex = au_br_index(sb, p->hi_id);
+ if (new_bindex == bindex)
+ continue;
+
+ if (new_bindex < 0) {
+ *update = 1;
+ au_hiput(p);
+ p->hi_inode = NULL;
+ continue;
+ }
+
+ if (new_bindex < iinfo->ii_btop)
+ iinfo->ii_btop = new_bindex;
+ if (iinfo->ii_bbot < new_bindex)
+ iinfo->ii_bbot = new_bindex;
+ /* swap two lower inode, and loop again */
+ q = au_hinode(iinfo, new_bindex);
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ if (tmp.hi_inode) {
+ bindex--;
+ p--;
+ }
+ }
+ au_update_ibrange(inode, /*do_put_zero*/0);
+ au_hinode_realloc(iinfo, nbr, /*may_shrink*/1); /* harmless if err */
+ e = au_dy_irefresh(inode);
+ if (unlikely(e && !err))
+ err = e;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+void au_refresh_iop(struct inode *inode, int force_getattr)
+{
+ int type;
+ struct au_sbinfo *sbi = au_sbi(inode->i_sb);
+ const struct inode_operations *iop
+ = force_getattr ? aufs_iop : sbi->si_iop_array;
+
+ if (inode->i_op == iop)
+ return;
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFDIR:
+ type = AuIop_DIR;
+ break;
+ case S_IFLNK:
+ type = AuIop_SYMLINK;
+ break;
+ default:
+ type = AuIop_OTHER;
+ break;
+ }
+
+ inode->i_op = iop + type;
+ /* unnecessary smp_wmb() */
+}
+
+int au_refresh_hinode_self(struct inode *inode)
+{
+ int err, update;
+
+ err = au_ii_refresh(inode, &update);
+ if (!err)
+ au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode));
+
+ AuTraceErr(err);
+ return err;
+}
+
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry)
+{
+ int err, e, update;
+ unsigned int flags;
+ umode_t mode;
+ aufs_bindex_t bindex, bbot;
+ unsigned char isdir;
+ struct au_hinode *p;
+ struct au_iinfo *iinfo;
+
+ err = au_ii_refresh(inode, &update);
+ if (unlikely(err))
+ goto out;
+
+ update = 0;
+ iinfo = au_ii(inode);
+ p = au_hinode(iinfo, iinfo->ii_btop);
+ mode = (inode->i_mode & S_IFMT);
+ isdir = S_ISDIR(mode);
+ flags = au_hi_flags(inode, isdir);
+ bbot = au_dbbot(dentry);
+ for (bindex = au_dbtop(dentry); bindex <= bbot; bindex++) {
+ struct inode *h_i, *h_inode;
+ struct dentry *h_d;
+
+ h_d = au_h_dptr(dentry, bindex);
+ if (!h_d || d_is_negative(h_d))
+ continue;
+
+ h_inode = d_inode(h_d);
+ AuDebugOn(mode != (h_inode->i_mode & S_IFMT));
+ if (iinfo->ii_btop <= bindex && bindex <= iinfo->ii_bbot) {
+ h_i = au_h_iptr(inode, bindex);
+ if (h_i) {
+ if (h_i == h_inode)
+ continue;
+ err = -EIO;
+ break;
+ }
+ }
+ if (bindex < iinfo->ii_btop)
+ iinfo->ii_btop = bindex;
+ if (iinfo->ii_bbot < bindex)
+ iinfo->ii_bbot = bindex;
+ au_set_h_iptr(inode, bindex, au_igrab(h_inode), flags);
+ update = 1;
+ }
+ au_update_ibrange(inode, /*do_put_zero*/0);
+ e = au_dy_irefresh(inode);
+ if (unlikely(e && !err))
+ err = e;
+ if (!err)
+ au_refresh_hinode_attr(inode, update && isdir);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int set_inode(struct inode *inode, struct dentry *dentry)
+{
+ int err;
+ unsigned int flags;
+ umode_t mode;
+ aufs_bindex_t bindex, btop, btail;
+ unsigned char isdir;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ struct au_iinfo *iinfo;
+ struct inode_operations *iop;
+
+ IiMustWriteLock(inode);
+
+ err = 0;
+ isdir = 0;
+ iop = au_sbi(inode->i_sb)->si_iop_array;
+ btop = au_dbtop(dentry);
+ h_dentry = au_h_dptr(dentry, btop);
+ h_inode = d_inode(h_dentry);
+ mode = h_inode->i_mode;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ btail = au_dbtail(dentry);
+ inode->i_op = iop + AuIop_OTHER;
+ inode->i_fop = &aufs_file_fop;
+ err = au_dy_iaop(inode, btop, h_inode);
+ if (unlikely(err))
+ goto out;
+ break;
+ case S_IFDIR:
+ isdir = 1;
+ btail = au_dbtaildir(dentry);
+ inode->i_op = iop + AuIop_DIR;
+ inode->i_fop = &aufs_dir_fop;
+ break;
+ case S_IFLNK:
+ btail = au_dbtail(dentry);
+ inode->i_op = iop + AuIop_SYMLINK;
+ break;
+ case S_IFBLK:
+ case S_IFCHR:
+ case S_IFIFO:
+ case S_IFSOCK:
+ btail = au_dbtail(dentry);
+ inode->i_op = iop + AuIop_OTHER;
+ init_special_inode(inode, mode, h_inode->i_rdev);
+ break;
+ default:
+ AuIOErr("Unknown file type 0%o\n", mode);
+ err = -EIO;
+ goto out;
+ }
+
+ /* do not set hnotify for whiteouted dirs (SHWH mode) */
+ flags = au_hi_flags(inode, isdir);
+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)
+ && au_ftest_hi(flags, HNOTIFY)
+ && dentry->d_name.len > AUFS_WH_PFX_LEN
+ && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))
+ au_fclr_hi(flags, HNOTIFY);
+ iinfo = au_ii(inode);
+ iinfo->ii_btop = btop;
+ iinfo->ii_bbot = btail;
+ for (bindex = btop; bindex <= btail; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry)
+ au_set_h_iptr(inode, bindex,
+ au_igrab(d_inode(h_dentry)), flags);
+ }
+ au_cpup_attr_all(inode, /*force*/1);
+ /*
+ * to force calling aufs_get_acl() every time,
+ * do not call cache_no_acl() for aufs inode.
+ */
+
+out:
+ return err;
+}
+
+/*
+ * successful returns with iinfo write_locked
+ * minus: errno
+ * zero: success, matched
+ * plus: no error, but unmatched
+ */
+static int reval_inode(struct inode *inode, struct dentry *dentry)
+{
+ int err;
+ unsigned int gen, igflags;
+ aufs_bindex_t bindex, bbot;
+ struct inode *h_inode, *h_dinode;
+ struct dentry *h_dentry;
+
+ /*
+ * before this function, if aufs got any iinfo lock, it must be only
+ * one, the parent dir.
+ * it can happen by UDBA and the obsoleted inode number.
+ */
+ err = -EIO;
+ if (unlikely(inode->i_ino == parent_ino(dentry)))
+ goto out;
+
+ err = 1;
+ ii_write_lock_new_child(inode);
+ h_dentry = au_h_dptr(dentry, au_dbtop(dentry));
+ h_dinode = d_inode(h_dentry);
+ bbot = au_ibbot(inode);
+ for (bindex = au_ibtop(inode); bindex <= bbot; bindex++) {
+ h_inode = au_h_iptr(inode, bindex);
+ if (!h_inode || h_inode != h_dinode)
+ continue;
+
+ err = 0;
+ gen = au_iigen(inode, &igflags);
+ if (gen == au_digen(dentry)
+ && !au_ig_ftest(igflags, HALF_REFRESHED))
+ break;
+
+ /* fully refresh inode using dentry */
+ err = au_refresh_hinode(inode, dentry);
+ if (!err)
+ au_update_iigen(inode, /*half*/0);
+ break;
+ }
+
+ if (unlikely(err))
+ ii_write_unlock(inode);
+out:
+ return err;
+}
+
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ unsigned int d_type, ino_t *ino)
+{
+ int err, idx;
+ const int isnondir = d_type != DT_DIR;
+
+ /* prevent hardlinked inode number from race condition */
+ if (isnondir) {
+ err = au_xinondir_enter(sb, bindex, h_ino, &idx);
+ if (unlikely(err))
+ goto out;
+ }
+
+ err = au_xino_read(sb, bindex, h_ino, ino);
+ if (unlikely(err))
+ goto out_xinondir;
+
+ if (!*ino) {
+ err = -EIO;
+ *ino = au_xino_new_ino(sb);
+ if (unlikely(!*ino))
+ goto out_xinondir;
+ err = au_xino_write(sb, bindex, h_ino, *ino);
+ if (unlikely(err))
+ goto out_xinondir;
+ }
+
+out_xinondir:
+ if (isnondir && idx >= 0)
+ au_xinondir_leave(sb, bindex, h_ino, idx);
+out:
+ return err;
+}
+
+/* successful returns with iinfo write_locked */
+/* todo: return with unlocked? */
+struct inode *au_new_inode(struct dentry *dentry, int must_new)
+{
+ struct inode *inode, *h_inode;
+ struct dentry *h_dentry;
+ struct super_block *sb;
+ ino_t h_ino, ino;
+ int err, idx, hlinked;
+ aufs_bindex_t btop;
+
+ sb = dentry->d_sb;
+ btop = au_dbtop(dentry);
+ h_dentry = au_h_dptr(dentry, btop);
+ h_inode = d_inode(h_dentry);
+ h_ino = h_inode->i_ino;
+ hlinked = !d_is_dir(h_dentry) && h_inode->i_nlink > 1;
+
+new_ino:
+ /*
+ * stop 'race'-ing between hardlinks under different
+ * parents.
+ */
+ if (hlinked) {
+ err = au_xinondir_enter(sb, btop, h_ino, &idx);
+ inode = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+ }
+
+ err = au_xino_read(sb, btop, h_ino, &ino);
+ inode = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_xinondir;
+
+ if (!ino) {
+ ino = au_xino_new_ino(sb);
+ if (unlikely(!ino)) {
+ inode = ERR_PTR(-EIO);
+ goto out_xinondir;
+ }
+ }
+
+ AuDbg("i%lu\n", (unsigned long)ino);
+ inode = au_iget_locked(sb, ino);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_xinondir;
+
+ AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW));
+ if (inode->i_state & I_NEW) {
+ ii_write_lock_new_child(inode);
+ err = set_inode(inode, dentry);
+ if (!err) {
+ unlock_new_inode(inode);
+ goto out_xinondir; /* success */
+ }
+
+ /*
+ * iget_failed() calls iput(), but we need to call
+ * ii_write_unlock() after iget_failed(). so dirty hack for
+ * i_count.
+ */
+ atomic_inc(&inode->i_count);
+ iget_failed(inode);
+ ii_write_unlock(inode);
+ au_xino_write(sb, btop, h_ino, /*ino*/0);
+ /* ignore this error */
+ goto out_iput;
+ } else if (!must_new && !IS_DEADDIR(inode) && inode->i_nlink) {
+ /*
+ * horrible race condition between lookup, readdir and copyup
+ * (or something).
+ */
+ if (hlinked && idx >= 0)
+ au_xinondir_leave(sb, btop, h_ino, idx);
+ err = reval_inode(inode, dentry);
+ if (unlikely(err < 0)) {
+ hlinked = 0;
+ goto out_iput;
+ }
+ if (!err)
+ goto out; /* success */
+ else if (hlinked && idx >= 0) {
+ err = au_xinondir_enter(sb, btop, h_ino, &idx);
+ if (unlikely(err)) {
+ iput(inode);
+ inode = ERR_PTR(err);
+ goto out;
+ }
+ }
+ }
+
+ if (unlikely(au_test_fs_unique_ino(h_inode)))
+ AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir,"
+ " b%d, %s, %pd, hi%lu, i%lu.\n",
+ btop, au_sbtype(h_dentry->d_sb), dentry,
+ (unsigned long)h_ino, (unsigned long)ino);
+ ino = 0;
+ err = au_xino_write(sb, btop, h_ino, /*ino*/0);
+ if (!err) {
+ iput(inode);
+ if (hlinked && idx >= 0)
+ au_xinondir_leave(sb, btop, h_ino, idx);
+ goto new_ino;
+ }
+
+out_iput:
+ iput(inode);
+ inode = ERR_PTR(err);
+out_xinondir:
+ if (hlinked && idx >= 0)
+ au_xinondir_leave(sb, btop, h_ino, idx);
+out:
+ return inode;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+ struct inode *inode)
+{
+ int err;
+ struct inode *hi;
+
+ err = au_br_rdonly(au_sbr(sb, bindex));
+
+ /* pseudo-link after flushed may happen out of bounds */
+ if (!err
+ && inode
+ && au_ibtop(inode) <= bindex
+ && bindex <= au_ibbot(inode)) {
+ /*
+ * permission check is unnecessary since vfsub routine
+ * will be called later
+ */
+ hi = au_h_iptr(inode, bindex);
+ if (hi)
+ err = IS_IMMUTABLE(hi) ? -EROFS : 0;
+ }
+
+ return err;
+}
+
+int au_test_h_perm(struct inode *h_inode, int mask)
+{
+ if (uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
+ return 0;
+ return inode_permission(h_inode, mask);
+}
+
+int au_test_h_perm_sio(struct inode *h_inode, int mask)
+{
+ if (au_test_nfs(h_inode->i_sb)
+ && (mask & MAY_WRITE)
+ && S_ISDIR(h_inode->i_mode))
+ mask |= MAY_READ; /* force permission check */
+ return au_test_h_perm(h_inode, mask);
+}
diff --git a/fs/aufs/inode.h b/fs/aufs/inode.h
new file mode 100644
index 000000000000..a6bc2768d3e7
--- /dev/null
+++ b/fs/aufs/inode.h
@@ -0,0 +1,698 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operations
+ */
+
+#ifndef __AUFS_INODE_H__
+#define __AUFS_INODE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fsnotify.h>
+#include "rwsem.h"
+
+struct vfsmount;
+
+struct au_hnotify {
+#ifdef CONFIG_AUFS_HNOTIFY
+#ifdef CONFIG_AUFS_HFSNOTIFY
+ /* never use fsnotify_add_vfsmount_mark() */
+ struct fsnotify_mark hn_mark;
+#endif
+ struct inode *hn_aufs_inode; /* no get/put */
+ struct rcu_head rcu;
+#endif
+} ____cacheline_aligned_in_smp;
+
+struct au_hinode {
+ struct inode *hi_inode;
+ aufs_bindex_t hi_id;
+#ifdef CONFIG_AUFS_HNOTIFY
+ struct au_hnotify *hi_notify;
+#endif
+
+ /* reference to the copied-up whiteout with get/put */
+ struct dentry *hi_whdentry;
+};
+
+/* ig_flags */
+#define AuIG_HALF_REFRESHED 1
+#define au_ig_ftest(flags, name) ((flags) & AuIG_##name)
+#define au_ig_fset(flags, name) \
+ do { (flags) |= AuIG_##name; } while (0)
+#define au_ig_fclr(flags, name) \
+ do { (flags) &= ~AuIG_##name; } while (0)
+
+struct au_iigen {
+ spinlock_t ig_spin;
+ __u32 ig_generation, ig_flags;
+};
+
+struct au_vdir;
+struct au_iinfo {
+ struct au_iigen ii_generation;
+ struct super_block *ii_hsb1; /* no get/put */
+
+ struct au_rwsem ii_rwsem;
+ aufs_bindex_t ii_btop, ii_bbot;
+ __u32 ii_higen;
+ struct au_hinode *ii_hinode;
+ struct au_vdir *ii_vdir;
+};
+
+struct au_icntnr {
+ struct au_iinfo iinfo;
+ struct inode vfs_inode;
+ struct hlist_bl_node plink;
+ struct rcu_head rcu;
+} ____cacheline_aligned_in_smp;
+
+/* au_pin flags */
+#define AuPin_DI_LOCKED 1
+#define AuPin_MNT_WRITE (1 << 1)
+#define au_ftest_pin(flags, name) ((flags) & AuPin_##name)
+#define au_fset_pin(flags, name) \
+ do { (flags) |= AuPin_##name; } while (0)
+#define au_fclr_pin(flags, name) \
+ do { (flags) &= ~AuPin_##name; } while (0)
+
+struct au_pin {
+ /* input */
+ struct dentry *dentry;
+ unsigned int udba;
+ unsigned char lsc_di, lsc_hi, flags;
+ aufs_bindex_t bindex;
+
+ /* output */
+ struct dentry *parent;
+ struct au_hinode *hdir;
+ struct vfsmount *h_mnt;
+
+ /* temporary unlock/relock for copyup */
+ struct dentry *h_dentry, *h_parent;
+ struct au_branch *br;
+ struct task_struct *task;
+};
+
+void au_pin_hdir_unlock(struct au_pin *p);
+int au_pin_hdir_lock(struct au_pin *p);
+int au_pin_hdir_relock(struct au_pin *p);
+void au_pin_hdir_acquire_nest(struct au_pin *p);
+void au_pin_hdir_release(struct au_pin *p);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_iinfo *au_ii(struct inode *inode)
+{
+ BUG_ON(is_bad_inode(inode));
+ return &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* inode.c */
+struct inode *au_igrab(struct inode *inode);
+void au_refresh_iop(struct inode *inode, int force_getattr);
+int au_refresh_hinode_self(struct inode *inode);
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry);
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ unsigned int d_type, ino_t *ino);
+struct inode *au_new_inode(struct dentry *dentry, int must_new);
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+ struct inode *inode);
+int au_test_h_perm(struct inode *h_inode, int mask);
+int au_test_h_perm_sio(struct inode *h_inode, int mask);
+
+static inline int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex,
+ ino_t h_ino, unsigned int d_type, ino_t *ino)
+{
+#ifdef CONFIG_AUFS_SHWH
+ return au_ino(sb, bindex, h_ino, d_type, ino);
+#else
+ return 0;
+#endif
+}
+
+/* i_op.c */
+enum {
+ AuIop_SYMLINK,
+ AuIop_DIR,
+ AuIop_OTHER,
+ AuIop_Last
+};
+extern struct inode_operations aufs_iop[AuIop_Last],
+ aufs_iop_nogetattr[AuIop_Last];
+
+/* au_wr_dir flags */
+#define AuWrDir_ADD_ENTRY 1
+#define AuWrDir_ISDIR (1 << 1)
+#define AuWrDir_TMPFILE (1 << 2)
+#define au_ftest_wrdir(flags, name) ((flags) & AuWrDir_##name)
+#define au_fset_wrdir(flags, name) \
+ do { (flags) |= AuWrDir_##name; } while (0)
+#define au_fclr_wrdir(flags, name) \
+ do { (flags) &= ~AuWrDir_##name; } while (0)
+
+struct au_wr_dir_args {
+ aufs_bindex_t force_btgt;
+ unsigned char flags;
+};
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+ struct au_wr_dir_args *args);
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin);
+void au_pin_init(struct au_pin *pin, struct dentry *dentry,
+ aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+ unsigned int udba, unsigned char flags);
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int udba, unsigned char flags) __must_check;
+int au_do_pin(struct au_pin *pin) __must_check;
+void au_unpin(struct au_pin *pin);
+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen);
+
+#define AuIcpup_DID_CPUP 1
+#define au_ftest_icpup(flags, name) ((flags) & AuIcpup_##name)
+#define au_fset_icpup(flags, name) \
+ do { (flags) |= AuIcpup_##name; } while (0)
+#define au_fclr_icpup(flags, name) \
+ do { (flags) &= ~AuIcpup_##name; } while (0)
+
+struct au_icpup_args {
+ unsigned char flags;
+ unsigned char pin_flags;
+ aufs_bindex_t btgt;
+ unsigned int udba;
+ struct au_pin pin;
+ struct path h_path;
+ struct inode *h_inode;
+};
+
+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
+ struct au_icpup_args *a);
+
+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path,
+ int locked);
+
+/* i_op_add.c */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent, int isdir);
+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t dev);
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname);
+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool want_excl);
+struct vfsub_aopen_args;
+int au_aopen_or_create(struct inode *dir, struct dentry *dentry,
+ struct vfsub_aopen_args *args);
+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode);
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+ struct dentry *dentry);
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+
+/* i_op_del.c */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup);
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent, int isdir);
+int aufs_unlink(struct inode *dir, struct dentry *dentry);
+int aufs_rmdir(struct inode *dir, struct dentry *dentry);
+
+/* i_op_ren.c */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt);
+int aufs_rename(struct inode *src_dir, struct dentry *src_dentry,
+ struct inode *dir, struct dentry *dentry,
+ unsigned int flags);
+
+/* iinfo.c */
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex);
+void au_hiput(struct au_hinode *hinode);
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_wh);
+unsigned int au_hi_flags(struct inode *inode, int isdir);
+
+/* hinode flags */
+#define AuHi_XINO 1
+#define AuHi_HNOTIFY (1 << 1)
+#define au_ftest_hi(flags, name) ((flags) & AuHi_##name)
+#define au_fset_hi(flags, name) \
+ do { (flags) |= AuHi_##name; } while (0)
+#define au_fclr_hi(flags, name) \
+ do { (flags) &= ~AuHi_##name; } while (0)
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuHi_HNOTIFY
+#define AuHi_HNOTIFY 0
+#endif
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+ struct inode *h_inode, unsigned int flags);
+
+void au_update_iigen(struct inode *inode, int half);
+void au_update_ibrange(struct inode *inode, int do_put_zero);
+
+void au_icntnr_init_once(void *_c);
+void au_hinode_init(struct au_hinode *hinode);
+int au_iinfo_init(struct inode *inode);
+void au_iinfo_fin(struct inode *inode);
+int au_hinode_realloc(struct au_iinfo *iinfo, int nbr, int may_shrink);
+
+#ifdef CONFIG_PROC_FS
+/* plink.c */
+int au_plink_maint(struct super_block *sb, int flags);
+struct au_sbinfo;
+void au_plink_maint_leave(struct au_sbinfo *sbinfo);
+int au_plink_maint_enter(struct super_block *sb);
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb);
+#else
+AuStubVoid(au_plink_list, struct super_block *sb)
+#endif
+int au_plink_test(struct inode *inode);
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex);
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_dentry);
+void au_plink_put(struct super_block *sb, int verbose);
+void au_plink_clean(struct super_block *sb, int verbose);
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id);
+#else
+AuStubInt0(au_plink_maint, struct super_block *sb, int flags);
+AuStubVoid(au_plink_maint_leave, struct au_sbinfo *sbinfo);
+AuStubInt0(au_plink_maint_enter, struct super_block *sb);
+AuStubVoid(au_plink_list, struct super_block *sb);
+AuStubInt0(au_plink_test, struct inode *inode);
+AuStub(struct dentry *, au_plink_lkup, return NULL,
+ struct inode *inode, aufs_bindex_t bindex);
+AuStubVoid(au_plink_append, struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_dentry);
+AuStubVoid(au_plink_put, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_clean, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_half_refresh, struct super_block *sb, aufs_bindex_t br_id);
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_AUFS_XATTR
+/* xattr.c */
+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags,
+ unsigned int verbose);
+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size);
+void au_xattr_init(struct super_block *sb);
+#else
+AuStubInt0(au_cpup_xattr, struct dentry *h_dst, struct dentry *h_src,
+ int ignore_flags, unsigned int verbose);
+AuStubVoid(au_xattr_init, struct super_block *sb);
+#endif
+
+#ifdef CONFIG_FS_POSIX_ACL
+struct posix_acl *aufs_get_acl(struct inode *inode, int type);
+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+#endif
+
+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL)
+enum {
+ AU_XATTR_SET,
+ AU_ACL_SET
+};
+
+struct au_sxattr {
+ int type;
+ union {
+ struct {
+ const char *name;
+ const void *value;
+ size_t size;
+ int flags;
+ } set;
+ struct {
+ struct posix_acl *acl;
+ int type;
+ } acl_set;
+ } u;
+};
+ssize_t au_sxattr(struct dentry *dentry, struct inode *inode,
+ struct au_sxattr *arg);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for iinfo */
+enum {
+ AuLsc_II_CHILD, /* child first */
+ AuLsc_II_CHILD2, /* rename(2), link(2), and cpup at hnotify */
+ AuLsc_II_CHILD3, /* copyup dirs */
+ AuLsc_II_PARENT, /* see AuLsc_I_PARENT in vfsub.h */
+ AuLsc_II_PARENT2,
+ AuLsc_II_PARENT3, /* copyup dirs */
+ AuLsc_II_NEW_CHILD
+};
+
+/*
+ * ii_read_lock_child, ii_write_lock_child,
+ * ii_read_lock_child2, ii_write_lock_child2,
+ * ii_read_lock_child3, ii_write_lock_child3,
+ * ii_read_lock_parent, ii_write_lock_parent,
+ * ii_read_lock_parent2, ii_write_lock_parent2,
+ * ii_read_lock_parent3, ii_write_lock_parent3,
+ * ii_read_lock_new_child, ii_write_lock_new_child,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void ii_read_lock_##name(struct inode *i) \
+{ \
+ au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void ii_write_lock_##name(struct inode *i) \
+{ \
+ au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuRWLockFuncs(name, lsc) \
+ AuReadLockFunc(name, lsc) \
+ AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+AuRWLockFuncs(new_child, NEW_CHILD);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define ii_read_unlock(i) au_rw_read_unlock(&au_ii(i)->ii_rwsem)
+#define ii_write_unlock(i) au_rw_write_unlock(&au_ii(i)->ii_rwsem)
+#define ii_downgrade_lock(i) au_rw_dgrade_lock(&au_ii(i)->ii_rwsem)
+
+#define IiMustNoWaiters(i) AuRwMustNoWaiters(&au_ii(i)->ii_rwsem)
+#define IiMustAnyLock(i) AuRwMustAnyLock(&au_ii(i)->ii_rwsem)
+#define IiMustWriteLock(i) AuRwMustWriteLock(&au_ii(i)->ii_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+static inline void au_icntnr_init(struct au_icntnr *c)
+{
+#ifdef CONFIG_AUFS_DEBUG
+ c->vfs_inode.i_mode = 0;
+#endif
+}
+
+static inline unsigned int au_iigen(struct inode *inode, unsigned int *igflags)
+{
+ unsigned int gen;
+ struct au_iinfo *iinfo;
+ struct au_iigen *iigen;
+
+ iinfo = au_ii(inode);
+ iigen = &iinfo->ii_generation;
+ spin_lock(&iigen->ig_spin);
+ if (igflags)
+ *igflags = iigen->ig_flags;
+ gen = iigen->ig_generation;
+ spin_unlock(&iigen->ig_spin);
+
+ return gen;
+}
+
+/* tiny test for inode number */
+/* tmpfs generation is too rough */
+static inline int au_test_higen(struct inode *inode, struct inode *h_inode)
+{
+ struct au_iinfo *iinfo;
+
+ iinfo = au_ii(inode);
+ AuRwMustAnyLock(&iinfo->ii_rwsem);
+ return !(iinfo->ii_hsb1 == h_inode->i_sb
+ && iinfo->ii_higen == h_inode->i_generation);
+}
+
+static inline void au_iigen_dec(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+ struct au_iigen *iigen;
+
+ iinfo = au_ii(inode);
+ iigen = &iinfo->ii_generation;
+ spin_lock(&iigen->ig_spin);
+ iigen->ig_generation--;
+ spin_unlock(&iigen->ig_spin);
+}
+
+static inline int au_iigen_test(struct inode *inode, unsigned int sigen)
+{
+ int err;
+
+ err = 0;
+ if (unlikely(inode && au_iigen(inode, NULL) != sigen))
+ err = -EIO;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_hinode *au_hinode(struct au_iinfo *iinfo,
+ aufs_bindex_t bindex)
+{
+ return iinfo->ii_hinode + bindex;
+}
+
+static inline int au_is_bad_inode(struct inode *inode)
+{
+ return !!(is_bad_inode(inode) || !au_hinode(au_ii(inode), 0));
+}
+
+static inline aufs_bindex_t au_ii_br_id(struct inode *inode,
+ aufs_bindex_t bindex)
+{
+ IiMustAnyLock(inode);
+ return au_hinode(au_ii(inode), bindex)->hi_id;
+}
+
+static inline aufs_bindex_t au_ibtop(struct inode *inode)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_btop;
+}
+
+static inline aufs_bindex_t au_ibbot(struct inode *inode)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_bbot;
+}
+
+static inline struct au_vdir *au_ivdir(struct inode *inode)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_vdir;
+}
+
+static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex)
+{
+ IiMustAnyLock(inode);
+ return au_hinode(au_ii(inode), bindex)->hi_whdentry;
+}
+
+static inline void au_set_ibtop(struct inode *inode, aufs_bindex_t bindex)
+{
+ IiMustWriteLock(inode);
+ au_ii(inode)->ii_btop = bindex;
+}
+
+static inline void au_set_ibbot(struct inode *inode, aufs_bindex_t bindex)
+{
+ IiMustWriteLock(inode);
+ au_ii(inode)->ii_bbot = bindex;
+}
+
+static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir)
+{
+ IiMustWriteLock(inode);
+ au_ii(inode)->ii_vdir = vdir;
+}
+
+static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex)
+{
+ IiMustAnyLock(inode);
+ return au_hinode(au_ii(inode), bindex);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_pinned_parent(struct au_pin *pin)
+{
+ if (pin)
+ return pin->parent;
+ return NULL;
+}
+
+static inline struct inode *au_pinned_h_dir(struct au_pin *pin)
+{
+ if (pin && pin->hdir)
+ return pin->hdir->hi_inode;
+ return NULL;
+}
+
+static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin)
+{
+ if (pin)
+ return pin->hdir;
+ return NULL;
+}
+
+static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry)
+{
+ if (pin)
+ pin->dentry = dentry;
+}
+
+static inline void au_pin_set_parent_lflag(struct au_pin *pin,
+ unsigned char lflag)
+{
+ if (pin) {
+ if (lflag)
+ au_fset_pin(pin->flags, DI_LOCKED);
+ else
+ au_fclr_pin(pin->flags, DI_LOCKED);
+ }
+}
+
+#if 0 /* reserved */
+static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent)
+{
+ if (pin) {
+ dput(pin->parent);
+ pin->parent = dget(parent);
+ }
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+struct au_branch;
+#ifdef CONFIG_AUFS_HNOTIFY
+struct au_hnotify_op {
+ void (*ctl)(struct au_hinode *hinode, int do_set);
+ int (*alloc)(struct au_hinode *hinode);
+
+ /*
+ * if it returns true, the the caller should free hinode->hi_notify,
+ * otherwise ->free() frees it.
+ */
+ int (*free)(struct au_hinode *hinode,
+ struct au_hnotify *hn) __must_check;
+
+ void (*fin)(void);
+ int (*init)(void);
+
+ int (*reset_br)(unsigned int udba, struct au_branch *br, int perm);
+ void (*fin_br)(struct au_branch *br);
+ int (*init_br)(struct au_branch *br, int perm);
+};
+
+/* hnotify.c */
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode);
+void au_hn_free(struct au_hinode *hinode);
+void au_hn_ctl(struct au_hinode *hinode, int do_set);
+void au_hn_reset(struct inode *inode, unsigned int flags);
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+ struct qstr *h_child_qstr, struct inode *h_child_inode);
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm);
+int au_hnotify_init_br(struct au_branch *br, int perm);
+void au_hnotify_fin_br(struct au_branch *br);
+int __init au_hnotify_init(void);
+void au_hnotify_fin(void);
+
+/* hfsnotify.c */
+extern const struct au_hnotify_op au_hnotify_op;
+
+static inline
+void au_hn_init(struct au_hinode *hinode)
+{
+ hinode->hi_notify = NULL;
+}
+
+static inline struct au_hnotify *au_hn(struct au_hinode *hinode)
+{
+ return hinode->hi_notify;
+}
+
+#else
+AuStub(int, au_hn_alloc, return -EOPNOTSUPP,
+ struct au_hinode *hinode __maybe_unused,
+ struct inode *inode __maybe_unused)
+AuStub(struct au_hnotify *, au_hn, return NULL, struct au_hinode *hinode)
+AuStubVoid(au_hn_free, struct au_hinode *hinode __maybe_unused)
+AuStubVoid(au_hn_ctl, struct au_hinode *hinode __maybe_unused,
+ int do_set __maybe_unused)
+AuStubVoid(au_hn_reset, struct inode *inode __maybe_unused,
+ unsigned int flags __maybe_unused)
+AuStubInt0(au_hnotify_reset_br, unsigned int udba __maybe_unused,
+ struct au_branch *br __maybe_unused,
+ int perm __maybe_unused)
+AuStubInt0(au_hnotify_init_br, struct au_branch *br __maybe_unused,
+ int perm __maybe_unused)
+AuStubVoid(au_hnotify_fin_br, struct au_branch *br __maybe_unused)
+AuStubInt0(__init au_hnotify_init, void)
+AuStubVoid(au_hnotify_fin, void)
+AuStubVoid(au_hn_init, struct au_hinode *hinode __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+static inline void au_hn_suspend(struct au_hinode *hdir)
+{
+ au_hn_ctl(hdir, /*do_set*/0);
+}
+
+static inline void au_hn_resume(struct au_hinode *hdir)
+{
+ au_hn_ctl(hdir, /*do_set*/1);
+}
+
+static inline void au_hn_inode_lock(struct au_hinode *hdir)
+{
+ inode_lock(hdir->hi_inode);
+ au_hn_suspend(hdir);
+}
+
+static inline void au_hn_inode_lock_nested(struct au_hinode *hdir,
+ unsigned int sc __maybe_unused)
+{
+ inode_lock_nested(hdir->hi_inode, sc);
+ au_hn_suspend(hdir);
+}
+
+#if 0 /* unused */
+#include "vfsub.h"
+static inline void au_hn_inode_lock_shared_nested(struct au_hinode *hdir,
+ unsigned int sc)
+{
+ inode_lock_shared_nested(hdir->hi_inode, sc);
+ au_hn_suspend(hdir);
+}
+#endif
+
+static inline void au_hn_inode_unlock(struct au_hinode *hdir)
+{
+ au_hn_resume(hdir);
+ inode_unlock(hdir->hi_inode);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_INODE_H__ */
diff --git a/fs/aufs/ioctl.c b/fs/aufs/ioctl.c
new file mode 100644
index 000000000000..fb2fb758283e
--- /dev/null
+++ b/fs/aufs/ioctl.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * ioctl
+ * plink-management and readdir in userspace.
+ * assist the pathconf(3) wrapper library.
+ * move-down
+ * File-based Hierarchical Storage Management.
+ */
+
+#include <linux/compat.h>
+#include <linux/file.h>
+#include "aufs.h"
+
+static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg)
+{
+ int err, fd;
+ aufs_bindex_t wbi, bindex, bbot;
+ struct file *h_file;
+ struct super_block *sb;
+ struct dentry *root;
+ struct au_branch *br;
+ struct aufs_wbr_fd wbrfd = {
+ .oflags = au_dir_roflags,
+ .brid = -1
+ };
+ const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY
+ | O_NOATIME | O_CLOEXEC;
+
+ AuDebugOn(wbrfd.oflags & ~valid);
+
+ if (arg) {
+ err = copy_from_user(&wbrfd, arg, sizeof(wbrfd));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = -EINVAL;
+ AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid);
+ wbrfd.oflags |= au_dir_roflags;
+ AuDbg("0%o\n", wbrfd.oflags);
+ if (unlikely(wbrfd.oflags & ~valid))
+ goto out;
+ }
+
+ fd = get_unused_fd_flags(0);
+ err = fd;
+ if (unlikely(fd < 0))
+ goto out;
+
+ h_file = ERR_PTR(-EINVAL);
+ wbi = 0;
+ br = NULL;
+ sb = path->dentry->d_sb;
+ root = sb->s_root;
+ aufs_read_lock(root, AuLock_IR);
+ bbot = au_sbbot(sb);
+ if (wbrfd.brid >= 0) {
+ wbi = au_br_index(sb, wbrfd.brid);
+ if (unlikely(wbi < 0 || wbi > bbot))
+ goto out_unlock;
+ }
+
+ h_file = ERR_PTR(-ENOENT);
+ br = au_sbr(sb, wbi);
+ if (!au_br_writable(br->br_perm)) {
+ if (arg)
+ goto out_unlock;
+
+ bindex = wbi + 1;
+ wbi = -1;
+ for (; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_writable(br->br_perm)) {
+ wbi = bindex;
+ br = au_sbr(sb, wbi);
+ break;
+ }
+ }
+ }
+ AuDbg("wbi %d\n", wbi);
+ if (wbi >= 0)
+ h_file = au_h_open(root, wbi, wbrfd.oflags, NULL,
+ /*force_wr*/0);
+
+out_unlock:
+ aufs_read_unlock(root, AuLock_IR);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out_fd;
+
+ au_lcnt_dec(&br->br_nfiles); /* cf. au_h_open() */
+ fd_install(fd, h_file);
+ err = fd;
+ goto out; /* success */
+
+out_fd:
+ put_unused_fd(fd);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long err;
+ struct dentry *dentry;
+
+ switch (cmd) {
+ case AUFS_CTL_RDU:
+ case AUFS_CTL_RDU_INO:
+ err = au_rdu_ioctl(file, cmd, arg);
+ break;
+
+ case AUFS_CTL_WBR_FD:
+ err = au_wbr_fd(&file->f_path, (void __user *)arg);
+ break;
+
+ case AUFS_CTL_IBUSY:
+ err = au_ibusy_ioctl(file, arg);
+ break;
+
+ case AUFS_CTL_BRINFO:
+ err = au_brinfo_ioctl(file, arg);
+ break;
+
+ case AUFS_CTL_FHSM_FD:
+ dentry = file->f_path.dentry;
+ if (IS_ROOT(dentry))
+ err = au_fhsm_fd(dentry->d_sb, arg);
+ else
+ err = -ENOTTY;
+ break;
+
+ default:
+ /* do not call the lower */
+ AuDbg("0x%x\n", cmd);
+ err = -ENOTTY;
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long err;
+
+ switch (cmd) {
+ case AUFS_CTL_MVDOWN:
+ err = au_mvdown(file->f_path.dentry, (void __user *)arg);
+ break;
+
+ case AUFS_CTL_WBR_FD:
+ err = au_wbr_fd(&file->f_path, (void __user *)arg);
+ break;
+
+ default:
+ /* do not call the lower */
+ AuDbg("0x%x\n", cmd);
+ err = -ENOTTY;
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long err;
+
+ switch (cmd) {
+ case AUFS_CTL_RDU:
+ case AUFS_CTL_RDU_INO:
+ err = au_rdu_compat_ioctl(file, cmd, arg);
+ break;
+
+ case AUFS_CTL_IBUSY:
+ err = au_ibusy_compat_ioctl(file, arg);
+ break;
+
+ case AUFS_CTL_BRINFO:
+ err = au_brinfo_compat_ioctl(file, arg);
+ break;
+
+ default:
+ err = aufs_ioctl_dir(file, cmd, arg);
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return aufs_ioctl_nondir(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
diff --git a/fs/aufs/lcnt.h b/fs/aufs/lcnt.h
new file mode 100644
index 000000000000..d8508f875f28
--- /dev/null
+++ b/fs/aufs/lcnt.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * simple long counter wrapper
+ */
+
+#ifndef __AUFS_LCNT_H__
+#define __AUFS_LCNT_H__
+
+#ifdef __KERNEL__
+
+#include "debug.h"
+
+#define AuLCntATOMIC 1
+#define AuLCntPCPUCNT 2
+/*
+ * why does percpu_refcount require extra synchronize_rcu()s in
+ * au_br_do_free()
+ */
+#define AuLCntPCPUREF 3
+
+/* #define AuLCntChosen AuLCntATOMIC */
+#define AuLCntChosen AuLCntPCPUCNT
+/* #define AuLCntChosen AuLCntPCPUREF */
+
+#if AuLCntChosen == AuLCntATOMIC
+#include <linux/atomic.h>
+
+typedef atomic_long_t au_lcnt_t;
+
+static inline int au_lcnt_init(au_lcnt_t *cnt, void *release __maybe_unused)
+{
+ atomic_long_set(cnt, 0);
+ return 0;
+}
+
+static inline void au_lcnt_wait_for_fin(au_lcnt_t *cnt __maybe_unused)
+{
+ /* empty */
+}
+
+static inline void au_lcnt_fin(au_lcnt_t *cnt __maybe_unused,
+ int do_sync __maybe_unused)
+{
+ /* empty */
+}
+
+static inline void au_lcnt_inc(au_lcnt_t *cnt)
+{
+ atomic_long_inc(cnt);
+}
+
+static inline void au_lcnt_dec(au_lcnt_t *cnt)
+{
+ atomic_long_dec(cnt);
+}
+
+static inline long au_lcnt_read(au_lcnt_t *cnt, int do_rev __maybe_unused)
+{
+ return atomic_long_read(cnt);
+}
+#endif
+
+#if AuLCntChosen == AuLCntPCPUCNT
+#include <linux/percpu_counter.h>
+
+typedef struct percpu_counter au_lcnt_t;
+
+static inline int au_lcnt_init(au_lcnt_t *cnt, void *release __maybe_unused)
+{
+ return percpu_counter_init(cnt, 0, GFP_NOFS);
+}
+
+static inline void au_lcnt_wait_for_fin(au_lcnt_t *cnt __maybe_unused)
+{
+ /* empty */
+}
+
+static inline void au_lcnt_fin(au_lcnt_t *cnt, int do_sync __maybe_unused)
+{
+ percpu_counter_destroy(cnt);
+}
+
+static inline void au_lcnt_inc(au_lcnt_t *cnt)
+{
+ percpu_counter_inc(cnt);
+}
+
+static inline void au_lcnt_dec(au_lcnt_t *cnt)
+{
+ percpu_counter_dec(cnt);
+}
+
+static inline long au_lcnt_read(au_lcnt_t *cnt, int do_rev __maybe_unused)
+{
+ s64 n;
+
+ n = percpu_counter_sum(cnt);
+ BUG_ON(n < 0);
+ if (LONG_MAX != LLONG_MAX
+ && n > LONG_MAX)
+ AuWarn1("%s\n", "wrap-around");
+
+ return n;
+}
+#endif
+
+#if AuLCntChosen == AuLCntPCPUREF
+#include <linux/percpu-refcount.h>
+
+typedef struct percpu_ref au_lcnt_t;
+
+static inline int au_lcnt_init(au_lcnt_t *cnt, percpu_ref_func_t *release)
+{
+ if (!release)
+ release = percpu_ref_exit;
+ return percpu_ref_init(cnt, release, /*percpu mode*/0, GFP_NOFS);
+}
+
+static inline void au_lcnt_wait_for_fin(au_lcnt_t *cnt __maybe_unused)
+{
+ synchronize_rcu();
+}
+
+static inline void au_lcnt_fin(au_lcnt_t *cnt, int do_sync)
+{
+ percpu_ref_kill(cnt);
+ if (do_sync)
+ au_lcnt_wait_for_fin(cnt);
+}
+
+static inline void au_lcnt_inc(au_lcnt_t *cnt)
+{
+ percpu_ref_get(cnt);
+}
+
+static inline void au_lcnt_dec(au_lcnt_t *cnt)
+{
+ percpu_ref_put(cnt);
+}
+
+/*
+ * avoid calling this func as possible.
+ */
+static inline long au_lcnt_read(au_lcnt_t *cnt, int do_rev)
+{
+ long l;
+
+ percpu_ref_switch_to_atomic_sync(cnt);
+ l = atomic_long_read(&cnt->count);
+ if (do_rev)
+ percpu_ref_switch_to_percpu(cnt);
+
+ /* percpu_ref is initialized by 1 instead of 0 */
+ return l - 1;
+}
+#endif
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuLCntZero(val) do { \
+ long l = val; \
+ if (l) \
+ AuDbg("%s = %ld\n", #val, l); \
+} while (0)
+#else
+#define AuLCntZero(val) do {} while (0)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_LCNT_H__ */
diff --git a/fs/aufs/loop.c b/fs/aufs/loop.c
new file mode 100644
index 000000000000..cef7d96362c7
--- /dev/null
+++ b/fs/aufs/loop.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * support for loopback block device as a branch
+ */
+
+#include "aufs.h"
+
+/* added into drivers/block/loop.c */
+static struct file *(*backing_file_func)(struct super_block *sb);
+
+/*
+ * test if two lower dentries have overlapping branches.
+ */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding)
+{
+ struct super_block *h_sb;
+ struct file *backing_file;
+
+ if (unlikely(!backing_file_func)) {
+ /* don't load "loop" module here */
+ backing_file_func = symbol_get(loop_backing_file);
+ if (unlikely(!backing_file_func))
+ /* "loop" module is not loaded */
+ return 0;
+ }
+
+ h_sb = h_adding->d_sb;
+ backing_file = backing_file_func(h_sb);
+ if (!backing_file)
+ return 0;
+
+ h_adding = backing_file->f_path.dentry;
+ /*
+ * h_adding can be local NFS.
+ * in this case aufs cannot detect the loop.
+ */
+ if (unlikely(h_adding->d_sb == sb))
+ return 1;
+ return !!au_test_subdir(h_adding, sb->s_root);
+}
+
+/* true if a kernel thread named 'loop[0-9].*' accesses a file */
+int au_test_loopback_kthread(void)
+{
+ int ret;
+ struct task_struct *tsk = current;
+ char c, comm[sizeof(tsk->comm)];
+
+ ret = 0;
+ if (tsk->flags & PF_KTHREAD) {
+ get_task_comm(comm, tsk);
+ c = comm[4];
+ ret = ('0' <= c && c <= '9'
+ && !strncmp(comm, "loop", 4));
+ }
+
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define au_warn_loopback_step 16
+static int au_warn_loopback_nelem = au_warn_loopback_step;
+static unsigned long *au_warn_loopback_array;
+
+void au_warn_loopback(struct super_block *h_sb)
+{
+ int i, new_nelem;
+ unsigned long *a, magic;
+ static DEFINE_SPINLOCK(spin);
+
+ magic = h_sb->s_magic;
+ spin_lock(&spin);
+ a = au_warn_loopback_array;
+ for (i = 0; i < au_warn_loopback_nelem && *a; i++)
+ if (a[i] == magic) {
+ spin_unlock(&spin);
+ return;
+ }
+
+ /* h_sb is new to us, print it */
+ if (i < au_warn_loopback_nelem) {
+ a[i] = magic;
+ goto pr;
+ }
+
+ /* expand the array */
+ new_nelem = au_warn_loopback_nelem + au_warn_loopback_step;
+ a = au_kzrealloc(au_warn_loopback_array,
+ au_warn_loopback_nelem * sizeof(unsigned long),
+ new_nelem * sizeof(unsigned long), GFP_ATOMIC,
+ /*may_shrink*/0);
+ if (a) {
+ au_warn_loopback_nelem = new_nelem;
+ au_warn_loopback_array = a;
+ a[i] = magic;
+ goto pr;
+ }
+
+ spin_unlock(&spin);
+ AuWarn1("realloc failed, ignored\n");
+ return;
+
+pr:
+ spin_unlock(&spin);
+ pr_warn("you may want to try another patch for loopback file "
+ "on %s(0x%lx) branch\n", au_sbtype(h_sb), magic);
+}
+
+int au_loopback_init(void)
+{
+ int err;
+ struct super_block *sb __maybe_unused;
+
+ BUILD_BUG_ON(sizeof(sb->s_magic) != sizeof(unsigned long));
+
+ err = 0;
+ au_warn_loopback_array = kcalloc(au_warn_loopback_step,
+ sizeof(unsigned long), GFP_NOFS);
+ if (unlikely(!au_warn_loopback_array))
+ err = -ENOMEM;
+
+ return err;
+}
+
+void au_loopback_fin(void)
+{
+ if (backing_file_func)
+ symbol_put(loop_backing_file);
+ au_kfree_try_rcu(au_warn_loopback_array);
+}
diff --git a/fs/aufs/loop.h b/fs/aufs/loop.h
new file mode 100644
index 000000000000..2bd4951a7413
--- /dev/null
+++ b/fs/aufs/loop.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * support for loopback mount as a branch
+ */
+
+#ifndef __AUFS_LOOP_H__
+#define __AUFS_LOOP_H__
+
+#ifdef __KERNEL__
+
+struct dentry;
+struct super_block;
+
+#ifdef CONFIG_AUFS_BDEV_LOOP
+/* drivers/block/loop.c */
+struct file *loop_backing_file(struct super_block *sb);
+
+/* loop.c */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding);
+int au_test_loopback_kthread(void);
+void au_warn_loopback(struct super_block *h_sb);
+
+int au_loopback_init(void);
+void au_loopback_fin(void);
+#else
+AuStubInt0(au_test_loopback_overlap, struct super_block *sb,
+ struct dentry *h_adding)
+AuStubInt0(au_test_loopback_kthread, void)
+AuStubVoid(au_warn_loopback, struct super_block *h_sb)
+
+AuStubInt0(au_loopback_init, void)
+AuStubVoid(au_loopback_fin, void)
+#endif /* BLK_DEV_LOOP */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_LOOP_H__ */
diff --git a/fs/aufs/magic.mk b/fs/aufs/magic.mk
new file mode 100644
index 000000000000..7bc9eef3ffec
--- /dev/null
+++ b/fs/aufs/magic.mk
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# defined in ${srctree}/fs/fuse/inode.c
+# tristate
+ifdef CONFIG_FUSE_FS
+ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546
+endif
+
+# defined in ${srctree}/fs/xfs/xfs_sb.h
+# tristate
+ifdef CONFIG_XFS_FS
+ccflags-y += -DXFS_SB_MAGIC=0x58465342
+endif
+
+# defined in ${srctree}/fs/configfs/mount.c
+# tristate
+ifdef CONFIG_CONFIGFS_FS
+ccflags-y += -DCONFIGFS_MAGIC=0x62656570
+endif
+
+# defined in ${srctree}/fs/ubifs/ubifs.h
+# tristate
+ifdef CONFIG_UBIFS_FS
+ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905
+endif
+
+# defined in ${srctree}/fs/hfsplus/hfsplus_raw.h
+# tristate
+ifdef CONFIG_HFSPLUS_FS
+ccflags-y += -DHFSPLUS_SUPER_MAGIC=0x482b
+endif
diff --git a/fs/aufs/module.c b/fs/aufs/module.c
new file mode 100644
index 000000000000..295736adfc3c
--- /dev/null
+++ b/fs/aufs/module.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * module global variables and operations
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+/* shrinkable realloc */
+void *au_krealloc(void *p, unsigned int new_sz, gfp_t gfp, int may_shrink)
+{
+ size_t sz;
+ int diff;
+
+ sz = 0;
+ diff = -1;
+ if (p) {
+#if 0 /* unused */
+ if (!new_sz) {
+ au_kfree_rcu(p);
+ p = NULL;
+ goto out;
+ }
+#else
+ AuDebugOn(!new_sz);
+#endif
+ sz = ksize(p);
+ diff = au_kmidx_sub(sz, new_sz);
+ }
+ if (sz && !diff)
+ goto out;
+
+ if (sz < new_sz)
+ /* expand or SLOB */
+ p = krealloc(p, new_sz, gfp);
+ else if (new_sz < sz && may_shrink) {
+ /* shrink */
+ void *q;
+
+ q = kmalloc(new_sz, gfp);
+ if (q) {
+ if (p) {
+ memcpy(q, p, new_sz);
+ au_kfree_try_rcu(p);
+ }
+ p = q;
+ } else
+ p = NULL;
+ }
+
+out:
+ return p;
+}
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp,
+ int may_shrink)
+{
+ p = au_krealloc(p, new_sz, gfp, may_shrink);
+ if (p && new_sz > nused)
+ memset(p + nused, 0, new_sz - nused);
+ return p;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * aufs caches
+ */
+struct kmem_cache *au_cache[AuCache_Last];
+
+static void au_cache_fin(void)
+{
+ int i;
+
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+
+ /* excluding AuCache_HNOTIFY */
+ BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last);
+ for (i = 0; i < AuCache_HNOTIFY; i++) {
+ kmem_cache_destroy(au_cache[i]);
+ au_cache[i] = NULL;
+ }
+}
+
+static int __init au_cache_init(void)
+{
+ au_cache[AuCache_DINFO] = AuCacheCtor(au_dinfo, au_di_init_once);
+ if (au_cache[AuCache_DINFO])
+ /* SLAB_DESTROY_BY_RCU */
+ au_cache[AuCache_ICNTNR] = AuCacheCtor(au_icntnr,
+ au_icntnr_init_once);
+ if (au_cache[AuCache_ICNTNR])
+ au_cache[AuCache_FINFO] = AuCacheCtor(au_finfo,
+ au_fi_init_once);
+ if (au_cache[AuCache_FINFO])
+ au_cache[AuCache_VDIR] = AuCache(au_vdir);
+ if (au_cache[AuCache_VDIR])
+ au_cache[AuCache_DEHSTR] = AuCache(au_vdir_dehstr);
+ if (au_cache[AuCache_DEHSTR])
+ return 0;
+
+ au_cache_fin();
+ return -ENOMEM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dir_roflags;
+
+#ifdef CONFIG_AUFS_SBILIST
+/*
+ * iterate_supers_type() doesn't protect us from
+ * remounting (branch management)
+ */
+struct hlist_bl_head au_sbilist;
+#endif
+
+/*
+ * functions for module interface.
+ */
+MODULE_LICENSE("GPL");
+/* MODULE_LICENSE("GPL v2"); */
+MODULE_AUTHOR("Junjiro R. Okajima <aufs-users@lists.sourceforge.net>");
+MODULE_DESCRIPTION(AUFS_NAME
+ " -- Advanced multi layered unification filesystem");
+MODULE_VERSION(AUFS_VERSION);
+MODULE_ALIAS_FS(AUFS_NAME);
+
+/* this module parameter has no meaning when SYSFS is disabled */
+int sysaufs_brs = 1;
+MODULE_PARM_DESC(brs, "use <sysfs>/fs/aufs/si_*/brN");
+module_param_named(brs, sysaufs_brs, int, 0444);
+
+/* this module parameter has no meaning when USER_NS is disabled */
+bool au_userns;
+MODULE_PARM_DESC(allow_userns, "allow unprivileged to mount under userns");
+module_param_named(allow_userns, au_userns, bool, 0444);
+
+/* ---------------------------------------------------------------------- */
+
+static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */
+
+int au_seq_path(struct seq_file *seq, struct path *path)
+{
+ int err;
+
+ err = seq_path(seq, path, au_esc_chars);
+ if (err >= 0)
+ err = 0;
+ else
+ err = -ENOMEM;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int __init aufs_init(void)
+{
+ int err, i;
+ char *p;
+
+ p = au_esc_chars;
+ for (i = 1; i <= ' '; i++)
+ *p++ = i;
+ *p++ = '\\';
+ *p++ = '\x7f';
+ *p = 0;
+
+ au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE);
+
+ memcpy(aufs_iop_nogetattr, aufs_iop, sizeof(aufs_iop));
+ for (i = 0; i < AuIop_Last; i++)
+ aufs_iop_nogetattr[i].getattr = NULL;
+
+ memset(au_cache, 0, sizeof(au_cache)); /* including hnotify */
+
+ au_sbilist_init();
+ sysaufs_brs_init();
+ au_debug_init();
+ au_dy_init();
+ err = sysaufs_init();
+ if (unlikely(err))
+ goto out;
+ err = dbgaufs_init();
+ if (unlikely(err))
+ goto out_sysaufs;
+ err = au_procfs_init();
+ if (unlikely(err))
+ goto out_dbgaufs;
+ err = au_wkq_init();
+ if (unlikely(err))
+ goto out_procfs;
+ err = au_loopback_init();
+ if (unlikely(err))
+ goto out_wkq;
+ err = au_hnotify_init();
+ if (unlikely(err))
+ goto out_loopback;
+ err = au_sysrq_init();
+ if (unlikely(err))
+ goto out_hin;
+ err = au_cache_init();
+ if (unlikely(err))
+ goto out_sysrq;
+
+ aufs_fs_type.fs_flags |= au_userns ? FS_USERNS_MOUNT : 0;
+ err = register_filesystem(&aufs_fs_type);
+ if (unlikely(err))
+ goto out_cache;
+
+ /* since we define pr_fmt, call printk directly */
+ printk(KERN_INFO AUFS_NAME " " AUFS_VERSION "\n");
+ goto out; /* success */
+
+out_cache:
+ au_cache_fin();
+out_sysrq:
+ au_sysrq_fin();
+out_hin:
+ au_hnotify_fin();
+out_loopback:
+ au_loopback_fin();
+out_wkq:
+ au_wkq_fin();
+out_procfs:
+ au_procfs_fin();
+out_dbgaufs:
+ dbgaufs_fin();
+out_sysaufs:
+ sysaufs_fin();
+ au_dy_fin();
+out:
+ return err;
+}
+
+static void __exit aufs_exit(void)
+{
+ unregister_filesystem(&aufs_fs_type);
+ au_cache_fin();
+ au_sysrq_fin();
+ au_hnotify_fin();
+ au_loopback_fin();
+ au_wkq_fin();
+ au_procfs_fin();
+ dbgaufs_fin();
+ sysaufs_fin();
+ au_dy_fin();
+}
+
+module_init(aufs_init);
+module_exit(aufs_exit);
diff --git a/fs/aufs/module.h b/fs/aufs/module.h
new file mode 100644
index 000000000000..3036754b90d0
--- /dev/null
+++ b/fs/aufs/module.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * module initialization and module-global
+ */
+
+#ifndef __AUFS_MODULE_H__
+#define __AUFS_MODULE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/slab.h>
+#include "debug.h"
+#include "dentry.h"
+#include "dir.h"
+#include "file.h"
+#include "inode.h"
+
+struct path;
+struct seq_file;
+
+/* module parameters */
+extern int sysaufs_brs;
+extern bool au_userns;
+
+/* ---------------------------------------------------------------------- */
+
+extern int au_dir_roflags;
+
+void *au_krealloc(void *p, unsigned int new_sz, gfp_t gfp, int may_shrink);
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp,
+ int may_shrink);
+
+/*
+ * Comparing the size of the object with sizeof(struct rcu_head)
+ * case 1: object is always larger
+ * --> au_kfree_rcu() or au_kfree_do_rcu()
+ * case 2: object is always smaller
+ * --> au_kfree_small()
+ * case 3: object can be any size
+ * --> au_kfree_try_rcu()
+ */
+
+static inline void au_kfree_do_rcu(const void *p)
+{
+ struct {
+ struct rcu_head rcu;
+ } *a = (void *)p;
+
+ kfree_rcu(a, rcu);
+}
+
+#define au_kfree_rcu(_p) do { \
+ typeof(_p) p = (_p); \
+ BUILD_BUG_ON(sizeof(*p) < sizeof(struct rcu_head)); \
+ if (p) \
+ au_kfree_do_rcu(p); \
+ } while (0)
+
+#define au_kfree_do_sz_test(sz) (sz >= sizeof(struct rcu_head))
+#define au_kfree_sz_test(p) (p && au_kfree_do_sz_test(ksize(p)))
+
+static inline void au_kfree_try_rcu(const void *p)
+{
+ if (!p)
+ return;
+ if (au_kfree_sz_test(p))
+ au_kfree_do_rcu(p);
+ else
+ kfree(p);
+}
+
+static inline void au_kfree_small(const void *p)
+{
+ if (!p)
+ return;
+ AuDebugOn(au_kfree_sz_test(p));
+ kfree(p);
+}
+
+static inline int au_kmidx_sub(size_t sz, size_t new_sz)
+{
+#ifndef CONFIG_SLOB
+ return kmalloc_index(sz) - kmalloc_index(new_sz);
+#else
+ return -1; /* SLOB is untested */
+#endif
+}
+
+int au_seq_path(struct seq_file *seq, struct path *path);
+
+#ifdef CONFIG_PROC_FS
+/* procfs.c */
+int __init au_procfs_init(void);
+void au_procfs_fin(void);
+#else
+AuStubInt0(au_procfs_init, void);
+AuStubVoid(au_procfs_fin, void);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* kmem cache */
+enum {
+ AuCache_DINFO,
+ AuCache_ICNTNR,
+ AuCache_FINFO,
+ AuCache_VDIR,
+ AuCache_DEHSTR,
+ AuCache_HNOTIFY, /* must be last */
+ AuCache_Last
+};
+
+extern struct kmem_cache *au_cache[AuCache_Last];
+
+#define AuCacheFlags (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD)
+#define AuCache(type) KMEM_CACHE(type, AuCacheFlags)
+#define AuCacheCtor(type, ctor) \
+ kmem_cache_create(#type, sizeof(struct type), \
+ __alignof__(struct type), AuCacheFlags, ctor)
+
+#define AuCacheFuncs(name, index) \
+ static inline struct au_##name *au_cache_alloc_##name(void) \
+ { return kmem_cache_alloc(au_cache[AuCache_##index], GFP_NOFS); } \
+ static inline void au_cache_free_##name##_norcu(struct au_##name *p) \
+ { kmem_cache_free(au_cache[AuCache_##index], p); } \
+ \
+ static inline void au_cache_free_##name##_rcu_cb(struct rcu_head *rcu) \
+ { void *p = rcu; \
+ p -= offsetof(struct au_##name, rcu); \
+ kmem_cache_free(au_cache[AuCache_##index], p); } \
+ static inline void au_cache_free_##name##_rcu(struct au_##name *p) \
+ { BUILD_BUG_ON(sizeof(struct au_##name) < sizeof(struct rcu_head)); \
+ call_rcu(&p->rcu, au_cache_free_##name##_rcu_cb); } \
+ \
+ static inline void au_cache_free_##name(struct au_##name *p) \
+ { /* au_cache_free_##name##_norcu(p); */ \
+ au_cache_free_##name##_rcu(p); }
+
+AuCacheFuncs(dinfo, DINFO);
+AuCacheFuncs(icntnr, ICNTNR);
+AuCacheFuncs(finfo, FINFO);
+AuCacheFuncs(vdir, VDIR);
+AuCacheFuncs(vdir_dehstr, DEHSTR);
+#ifdef CONFIG_AUFS_HNOTIFY
+AuCacheFuncs(hnotify, HNOTIFY);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_MODULE_H__ */
diff --git a/fs/aufs/mvdown.c b/fs/aufs/mvdown.c
new file mode 100644
index 000000000000..0b065e6fcfe9
--- /dev/null
+++ b/fs/aufs/mvdown.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * move-down, opposite of copy-up
+ */
+
+#include "aufs.h"
+
+struct au_mvd_args {
+ struct {
+ struct super_block *h_sb;
+ struct dentry *h_parent;
+ struct au_hinode *hdir;
+ struct inode *h_dir, *h_inode;
+ struct au_pin pin;
+ } info[AUFS_MVDOWN_NARRAY];
+
+ struct aufs_mvdown mvdown;
+ struct dentry *dentry, *parent;
+ struct inode *inode, *dir;
+ struct super_block *sb;
+ aufs_bindex_t bopq, bwh, bfound;
+ unsigned char rename_lock;
+};
+
+#define mvd_errno mvdown.au_errno
+#define mvd_bsrc mvdown.stbr[AUFS_MVDOWN_UPPER].bindex
+#define mvd_src_brid mvdown.stbr[AUFS_MVDOWN_UPPER].brid
+#define mvd_bdst mvdown.stbr[AUFS_MVDOWN_LOWER].bindex
+#define mvd_dst_brid mvdown.stbr[AUFS_MVDOWN_LOWER].brid
+
+#define mvd_h_src_sb info[AUFS_MVDOWN_UPPER].h_sb
+#define mvd_h_src_parent info[AUFS_MVDOWN_UPPER].h_parent
+#define mvd_hdir_src info[AUFS_MVDOWN_UPPER].hdir
+#define mvd_h_src_dir info[AUFS_MVDOWN_UPPER].h_dir
+#define mvd_h_src_inode info[AUFS_MVDOWN_UPPER].h_inode
+#define mvd_pin_src info[AUFS_MVDOWN_UPPER].pin
+
+#define mvd_h_dst_sb info[AUFS_MVDOWN_LOWER].h_sb
+#define mvd_h_dst_parent info[AUFS_MVDOWN_LOWER].h_parent
+#define mvd_hdir_dst info[AUFS_MVDOWN_LOWER].hdir
+#define mvd_h_dst_dir info[AUFS_MVDOWN_LOWER].h_dir
+#define mvd_h_dst_inode info[AUFS_MVDOWN_LOWER].h_inode
+#define mvd_pin_dst info[AUFS_MVDOWN_LOWER].pin
+
+#define AU_MVD_PR(flag, ...) do { \
+ if (flag) \
+ pr_err(__VA_ARGS__); \
+ } while (0)
+
+static int find_lower_writable(struct au_mvd_args *a)
+{
+ struct super_block *sb;
+ aufs_bindex_t bindex, bbot;
+ struct au_branch *br;
+
+ sb = a->sb;
+ bindex = a->mvd_bsrc;
+ bbot = au_sbbot(sb);
+ if (a->mvdown.flags & AUFS_MVDOWN_FHSM_LOWER)
+ for (bindex++; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_fhsm(br->br_perm)
+ && !sb_rdonly(au_br_sb(br)))
+ return bindex;
+ }
+ else if (!(a->mvdown.flags & AUFS_MVDOWN_ROLOWER))
+ for (bindex++; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!au_br_rdonly(br))
+ return bindex;
+ }
+ else
+ for (bindex++; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!sb_rdonly(au_br_sb(br))) {
+ if (au_br_rdonly(br))
+ a->mvdown.flags
+ |= AUFS_MVDOWN_ROLOWER_R;
+ return bindex;
+ }
+ }
+
+ return -1;
+}
+
+/* make the parent dir on bdst */
+static int au_do_mkdir(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+
+ err = 0;
+ a->mvd_hdir_src = au_hi(a->dir, a->mvd_bsrc);
+ a->mvd_hdir_dst = au_hi(a->dir, a->mvd_bdst);
+ a->mvd_h_src_parent = au_h_dptr(a->parent, a->mvd_bsrc);
+ a->mvd_h_dst_parent = NULL;
+ if (au_dbbot(a->parent) >= a->mvd_bdst)
+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
+ if (!a->mvd_h_dst_parent) {
+ err = au_cpdown_dirs(a->dentry, a->mvd_bdst);
+ if (unlikely(err)) {
+ AU_MVD_PR(dmsg, "cpdown_dirs failed\n");
+ goto out;
+ }
+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* lock them all */
+static int au_do_lock(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct dentry *h_trap;
+
+ a->mvd_h_src_sb = au_sbr_sb(a->sb, a->mvd_bsrc);
+ a->mvd_h_dst_sb = au_sbr_sb(a->sb, a->mvd_bdst);
+ err = au_pin(&a->mvd_pin_dst, a->dentry, a->mvd_bdst,
+ au_opt_udba(a->sb),
+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+ AuTraceErr(err);
+ if (unlikely(err)) {
+ AU_MVD_PR(dmsg, "pin_dst failed\n");
+ goto out;
+ }
+
+ if (a->mvd_h_src_sb != a->mvd_h_dst_sb) {
+ a->rename_lock = 0;
+ au_pin_init(&a->mvd_pin_src, a->dentry, a->mvd_bsrc,
+ AuLsc_DI_PARENT, AuLsc_I_PARENT3,
+ au_opt_udba(a->sb),
+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+ err = au_do_pin(&a->mvd_pin_src);
+ AuTraceErr(err);
+ a->mvd_h_src_dir = d_inode(a->mvd_h_src_parent);
+ if (unlikely(err)) {
+ AU_MVD_PR(dmsg, "pin_src failed\n");
+ goto out_dst;
+ }
+ goto out; /* success */
+ }
+
+ a->rename_lock = 1;
+ au_pin_hdir_unlock(&a->mvd_pin_dst);
+ err = au_pin(&a->mvd_pin_src, a->dentry, a->mvd_bsrc,
+ au_opt_udba(a->sb),
+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+ AuTraceErr(err);
+ a->mvd_h_src_dir = d_inode(a->mvd_h_src_parent);
+ if (unlikely(err)) {
+ AU_MVD_PR(dmsg, "pin_src failed\n");
+ au_pin_hdir_lock(&a->mvd_pin_dst);
+ goto out_dst;
+ }
+ au_pin_hdir_unlock(&a->mvd_pin_src);
+ h_trap = vfsub_lock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
+ if (h_trap) {
+ err = (h_trap != a->mvd_h_src_parent);
+ if (err)
+ err = (h_trap != a->mvd_h_dst_parent);
+ }
+ BUG_ON(err); /* it should never happen */
+ if (unlikely(a->mvd_h_src_dir != au_pinned_h_dir(&a->mvd_pin_src))) {
+ err = -EBUSY;
+ AuTraceErr(err);
+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
+ au_pin_hdir_lock(&a->mvd_pin_src);
+ au_unpin(&a->mvd_pin_src);
+ au_pin_hdir_lock(&a->mvd_pin_dst);
+ goto out_dst;
+ }
+ goto out; /* success */
+
+out_dst:
+ au_unpin(&a->mvd_pin_dst);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static void au_do_unlock(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ if (!a->rename_lock)
+ au_unpin(&a->mvd_pin_src);
+ else {
+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
+ au_pin_hdir_lock(&a->mvd_pin_src);
+ au_unpin(&a->mvd_pin_src);
+ au_pin_hdir_lock(&a->mvd_pin_dst);
+ }
+ au_unpin(&a->mvd_pin_dst);
+}
+
+/* copy-down the file */
+static int au_do_cpdown(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct au_cp_generic cpg = {
+ .dentry = a->dentry,
+ .bdst = a->mvd_bdst,
+ .bsrc = a->mvd_bsrc,
+ .len = -1,
+ .pin = &a->mvd_pin_dst,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+
+ AuDbg("b%d, b%d\n", cpg.bsrc, cpg.bdst);
+ if (a->mvdown.flags & AUFS_MVDOWN_OWLOWER)
+ au_fset_cpup(cpg.flags, OVERWRITE);
+ if (a->mvdown.flags & AUFS_MVDOWN_ROLOWER)
+ au_fset_cpup(cpg.flags, RWDST);
+ err = au_sio_cpdown_simple(&cpg);
+ if (unlikely(err))
+ AU_MVD_PR(dmsg, "cpdown failed\n");
+
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * unlink the whiteout on bdst if exist which may be created by UDBA while we
+ * were sleeping
+ */
+static int au_do_unlink_wh(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct path h_path;
+ struct au_branch *br;
+ struct inode *delegated;
+
+ br = au_sbr(a->sb, a->mvd_bdst);
+ h_path.dentry = au_wh_lkup(a->mvd_h_dst_parent, &a->dentry->d_name, br);
+ err = PTR_ERR(h_path.dentry);
+ if (IS_ERR(h_path.dentry)) {
+ AU_MVD_PR(dmsg, "wh_lkup failed\n");
+ goto out;
+ }
+
+ err = 0;
+ if (d_is_positive(h_path.dentry)) {
+ h_path.mnt = au_br_mnt(br);
+ delegated = NULL;
+ err = vfsub_unlink(d_inode(a->mvd_h_dst_parent), &h_path,
+ &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ if (unlikely(err))
+ AU_MVD_PR(dmsg, "wh_unlink failed\n");
+ }
+ dput(h_path.dentry);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * unlink the topmost h_dentry
+ */
+static int au_do_unlink(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct path h_path;
+ struct inode *delegated;
+
+ h_path.mnt = au_sbr_mnt(a->sb, a->mvd_bsrc);
+ h_path.dentry = au_h_dptr(a->dentry, a->mvd_bsrc);
+ delegated = NULL;
+ err = vfsub_unlink(a->mvd_h_src_dir, &h_path, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ if (unlikely(err))
+ AU_MVD_PR(dmsg, "unlink failed\n");
+
+ AuTraceErr(err);
+ return err;
+}
+
+/* Since mvdown succeeded, we ignore an error of this function */
+static void au_do_stfs(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct au_branch *br;
+
+ a->mvdown.flags |= AUFS_MVDOWN_STFS_FAILED;
+ br = au_sbr(a->sb, a->mvd_bsrc);
+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_UPPER].stfs);
+ if (!err) {
+ br = au_sbr(a->sb, a->mvd_bdst);
+ a->mvdown.stbr[AUFS_MVDOWN_LOWER].brid = br->br_id;
+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_LOWER].stfs);
+ }
+ if (!err)
+ a->mvdown.flags &= ~AUFS_MVDOWN_STFS_FAILED;
+ else
+ AU_MVD_PR(dmsg, "statfs failed (%d), ignored\n", err);
+}
+
+/*
+ * copy-down the file and unlink the bsrc file.
+ * - unlink the bdst whout if exist
+ * - copy-down the file (with whtmp name and rename)
+ * - unlink the bsrc file
+ */
+static int au_do_mvdown(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+
+ err = au_do_mkdir(dmsg, a);
+ if (!err)
+ err = au_do_lock(dmsg, a);
+ if (unlikely(err))
+ goto out;
+
+ /*
+ * do not revert the activities we made on bdst since they should be
+ * harmless in aufs.
+ */
+
+ err = au_do_cpdown(dmsg, a);
+ if (!err)
+ err = au_do_unlink_wh(dmsg, a);
+ if (!err && !(a->mvdown.flags & AUFS_MVDOWN_KUPPER))
+ err = au_do_unlink(dmsg, a);
+ if (unlikely(err))
+ goto out_unlock;
+
+ AuDbg("%pd2, 0x%x, %d --> %d\n",
+ a->dentry, a->mvdown.flags, a->mvd_bsrc, a->mvd_bdst);
+ if (find_lower_writable(a) < 0)
+ a->mvdown.flags |= AUFS_MVDOWN_BOTTOM;
+
+ if (a->mvdown.flags & AUFS_MVDOWN_STFS)
+ au_do_stfs(dmsg, a);
+
+ /* maintain internal array */
+ if (!(a->mvdown.flags & AUFS_MVDOWN_KUPPER)) {
+ au_set_h_dptr(a->dentry, a->mvd_bsrc, NULL);
+ au_set_dbtop(a->dentry, a->mvd_bdst);
+ au_set_h_iptr(a->inode, a->mvd_bsrc, NULL, /*flags*/0);
+ au_set_ibtop(a->inode, a->mvd_bdst);
+ } else {
+ /* hide the lower */
+ au_set_h_dptr(a->dentry, a->mvd_bdst, NULL);
+ au_set_dbbot(a->dentry, a->mvd_bsrc);
+ au_set_h_iptr(a->inode, a->mvd_bdst, NULL, /*flags*/0);
+ au_set_ibbot(a->inode, a->mvd_bsrc);
+ }
+ if (au_dbbot(a->dentry) < a->mvd_bdst)
+ au_set_dbbot(a->dentry, a->mvd_bdst);
+ if (au_ibbot(a->inode) < a->mvd_bdst)
+ au_set_ibbot(a->inode, a->mvd_bdst);
+
+out_unlock:
+ au_do_unlock(dmsg, a);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* make sure the file is idle */
+static int au_mvd_args_busy(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err, plinked;
+
+ err = 0;
+ plinked = !!au_opt_test(au_mntflags(a->sb), PLINK);
+ if (au_dbtop(a->dentry) == a->mvd_bsrc
+ && au_dcount(a->dentry) == 1
+ && atomic_read(&a->inode->i_count) == 1
+ /* && a->mvd_h_src_inode->i_nlink == 1 */
+ && (!plinked || !au_plink_test(a->inode))
+ && a->inode->i_nlink == 1)
+ goto out;
+
+ err = -EBUSY;
+ AU_MVD_PR(dmsg,
+ "b%d, d{b%d, c%d?}, i{c%d?, l%u}, hi{l%u}, p{%d, %d}\n",
+ a->mvd_bsrc, au_dbtop(a->dentry), au_dcount(a->dentry),
+ atomic_read(&a->inode->i_count), a->inode->i_nlink,
+ a->mvd_h_src_inode->i_nlink,
+ plinked, plinked ? au_plink_test(a->inode) : 0);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* make sure the parent dir is fine */
+static int au_mvd_args_parent(const unsigned char dmsg,
+ struct au_mvd_args *a)
+{
+ int err;
+ aufs_bindex_t bindex;
+
+ err = 0;
+ if (unlikely(au_alive_dir(a->parent))) {
+ err = -ENOENT;
+ AU_MVD_PR(dmsg, "parent dir is dead\n");
+ goto out;
+ }
+
+ a->bopq = au_dbdiropq(a->parent);
+ bindex = au_wbr_nonopq(a->dentry, a->mvd_bdst);
+ AuDbg("b%d\n", bindex);
+ if (unlikely((bindex >= 0 && bindex < a->mvd_bdst)
+ || (a->bopq != -1 && a->bopq < a->mvd_bdst))) {
+ err = -EINVAL;
+ a->mvd_errno = EAU_MVDOWN_OPAQUE;
+ AU_MVD_PR(dmsg, "ancestor is opaque b%d, b%d\n",
+ a->bopq, a->mvd_bdst);
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_mvd_args_intermediate(const unsigned char dmsg,
+ struct au_mvd_args *a)
+{
+ int err;
+ struct au_dinfo *dinfo, *tmp;
+
+ /* lookup the next lower positive entry */
+ err = -ENOMEM;
+ tmp = au_di_alloc(a->sb, AuLsc_DI_TMP);
+ if (unlikely(!tmp))
+ goto out;
+
+ a->bfound = -1;
+ a->bwh = -1;
+ dinfo = au_di(a->dentry);
+ au_di_cp(tmp, dinfo);
+ au_di_swap(tmp, dinfo);
+
+ /* returns the number of positive dentries */
+ err = au_lkup_dentry(a->dentry, a->mvd_bsrc + 1,
+ /* AuLkup_IGNORE_PERM */ 0);
+ if (!err)
+ a->bwh = au_dbwh(a->dentry);
+ else if (err > 0)
+ a->bfound = au_dbtop(a->dentry);
+
+ au_di_swap(tmp, dinfo);
+ au_rw_write_unlock(&tmp->di_rwsem);
+ au_di_free(tmp);
+ if (unlikely(err < 0))
+ AU_MVD_PR(dmsg, "failed look-up lower\n");
+
+ /*
+ * here, we have these cases.
+ * bfound == -1
+ * no positive dentry under bsrc. there are more sub-cases.
+ * bwh < 0
+ * there no whiteout, we can safely move-down.
+ * bwh <= bsrc
+ * impossible
+ * bsrc < bwh && bwh < bdst
+ * there is a whiteout on RO branch. cannot proceed.
+ * bwh == bdst
+ * there is a whiteout on the RW target branch. it should
+ * be removed.
+ * bdst < bwh
+ * there is a whiteout somewhere unrelated branch.
+ * -1 < bfound && bfound <= bsrc
+ * impossible.
+ * bfound < bdst
+ * found, but it is on RO branch between bsrc and bdst. cannot
+ * proceed.
+ * bfound == bdst
+ * found, replace it if AUFS_MVDOWN_FORCE is set. otherwise return
+ * error.
+ * bdst < bfound
+ * found, after we create the file on bdst, it will be hidden.
+ */
+
+ AuDebugOn(a->bfound == -1
+ && a->bwh != -1
+ && a->bwh <= a->mvd_bsrc);
+ AuDebugOn(-1 < a->bfound
+ && a->bfound <= a->mvd_bsrc);
+
+ err = -EINVAL;
+ if (a->bfound == -1
+ && a->mvd_bsrc < a->bwh
+ && a->bwh != -1
+ && a->bwh < a->mvd_bdst) {
+ a->mvd_errno = EAU_MVDOWN_WHITEOUT;
+ AU_MVD_PR(dmsg, "bsrc %d, bdst %d, bfound %d, bwh %d\n",
+ a->mvd_bsrc, a->mvd_bdst, a->bfound, a->bwh);
+ goto out;
+ } else if (a->bfound != -1 && a->bfound < a->mvd_bdst) {
+ a->mvd_errno = EAU_MVDOWN_UPPER;
+ AU_MVD_PR(dmsg, "bdst %d, bfound %d\n",
+ a->mvd_bdst, a->bfound);
+ goto out;
+ }
+
+ err = 0; /* success */
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_mvd_args_exist(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+
+ err = 0;
+ if (!(a->mvdown.flags & AUFS_MVDOWN_OWLOWER)
+ && a->bfound == a->mvd_bdst)
+ err = -EEXIST;
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_mvd_args(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct au_branch *br;
+
+ err = -EISDIR;
+ if (unlikely(S_ISDIR(a->inode->i_mode)))
+ goto out;
+
+ err = -EINVAL;
+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_UPPER))
+ a->mvd_bsrc = au_ibtop(a->inode);
+ else {
+ a->mvd_bsrc = au_br_index(a->sb, a->mvd_src_brid);
+ if (unlikely(a->mvd_bsrc < 0
+ || (a->mvd_bsrc < au_dbtop(a->dentry)
+ || au_dbbot(a->dentry) < a->mvd_bsrc
+ || !au_h_dptr(a->dentry, a->mvd_bsrc))
+ || (a->mvd_bsrc < au_ibtop(a->inode)
+ || au_ibbot(a->inode) < a->mvd_bsrc
+ || !au_h_iptr(a->inode, a->mvd_bsrc)))) {
+ a->mvd_errno = EAU_MVDOWN_NOUPPER;
+ AU_MVD_PR(dmsg, "no upper\n");
+ goto out;
+ }
+ }
+ if (unlikely(a->mvd_bsrc == au_sbbot(a->sb))) {
+ a->mvd_errno = EAU_MVDOWN_BOTTOM;
+ AU_MVD_PR(dmsg, "on the bottom\n");
+ goto out;
+ }
+ a->mvd_h_src_inode = au_h_iptr(a->inode, a->mvd_bsrc);
+ br = au_sbr(a->sb, a->mvd_bsrc);
+ err = au_br_rdonly(br);
+ if (!(a->mvdown.flags & AUFS_MVDOWN_ROUPPER)) {
+ if (unlikely(err))
+ goto out;
+ } else if (!(vfsub_native_ro(a->mvd_h_src_inode)
+ || IS_APPEND(a->mvd_h_src_inode))) {
+ if (err)
+ a->mvdown.flags |= AUFS_MVDOWN_ROUPPER_R;
+ /* go on */
+ } else
+ goto out;
+
+ err = -EINVAL;
+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_LOWER)) {
+ a->mvd_bdst = find_lower_writable(a);
+ if (unlikely(a->mvd_bdst < 0)) {
+ a->mvd_errno = EAU_MVDOWN_BOTTOM;
+ AU_MVD_PR(dmsg, "no writable lower branch\n");
+ goto out;
+ }
+ } else {
+ a->mvd_bdst = au_br_index(a->sb, a->mvd_dst_brid);
+ if (unlikely(a->mvd_bdst < 0
+ || au_sbbot(a->sb) < a->mvd_bdst)) {
+ a->mvd_errno = EAU_MVDOWN_NOLOWERBR;
+ AU_MVD_PR(dmsg, "no lower brid\n");
+ goto out;
+ }
+ }
+
+ err = au_mvd_args_busy(dmsg, a);
+ if (!err)
+ err = au_mvd_args_parent(dmsg, a);
+ if (!err)
+ err = au_mvd_args_intermediate(dmsg, a);
+ if (!err)
+ err = au_mvd_args_exist(dmsg, a);
+ if (!err)
+ AuDbg("b%d, b%d\n", a->mvd_bsrc, a->mvd_bdst);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *uarg)
+{
+ int err, e;
+ unsigned char dmsg;
+ struct au_mvd_args *args;
+ struct inode *inode;
+
+ inode = d_inode(dentry);
+ err = -EPERM;
+ if (unlikely(!capable(CAP_SYS_ADMIN)))
+ goto out;
+
+ err = -ENOMEM;
+ args = kmalloc(sizeof(*args), GFP_NOFS);
+ if (unlikely(!args))
+ goto out;
+
+ err = copy_from_user(&args->mvdown, uarg, sizeof(args->mvdown));
+ if (!err)
+ err = !access_ok(uarg, sizeof(*uarg));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out_free;
+ }
+ AuDbg("flags 0x%x\n", args->mvdown.flags);
+ args->mvdown.flags &= ~(AUFS_MVDOWN_ROLOWER_R | AUFS_MVDOWN_ROUPPER_R);
+ args->mvdown.au_errno = 0;
+ args->dentry = dentry;
+ args->inode = inode;
+ args->sb = dentry->d_sb;
+
+ err = -ENOENT;
+ dmsg = !!(args->mvdown.flags & AUFS_MVDOWN_DMSG);
+ args->parent = dget_parent(dentry);
+ args->dir = d_inode(args->parent);
+ inode_lock_nested(args->dir, I_MUTEX_PARENT);
+ dput(args->parent);
+ if (unlikely(args->parent != dentry->d_parent)) {
+ AU_MVD_PR(dmsg, "parent dir is moved\n");
+ goto out_dir;
+ }
+
+ inode_lock_nested(inode, I_MUTEX_CHILD);
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_NOPLMW);
+ if (unlikely(err))
+ goto out_inode;
+
+ di_write_lock_parent(args->parent);
+ err = au_mvd_args(dmsg, args);
+ if (unlikely(err))
+ goto out_parent;
+
+ err = au_do_mvdown(dmsg, args);
+ if (unlikely(err))
+ goto out_parent;
+
+ au_cpup_attr_timesizes(args->dir);
+ au_cpup_attr_timesizes(inode);
+ if (!(args->mvdown.flags & AUFS_MVDOWN_KUPPER))
+ au_cpup_igen(inode, au_h_iptr(inode, args->mvd_bdst));
+ /* au_digen_dec(dentry); */
+
+out_parent:
+ di_write_unlock(args->parent);
+ aufs_read_unlock(dentry, AuLock_DW);
+out_inode:
+ inode_unlock(inode);
+out_dir:
+ inode_unlock(args->dir);
+out_free:
+ e = copy_to_user(uarg, &args->mvdown, sizeof(args->mvdown));
+ if (unlikely(e))
+ err = -EFAULT;
+ au_kfree_rcu(args);
+out:
+ AuTraceErr(err);
+ return err;
+}
diff --git a/fs/aufs/opts.c b/fs/aufs/opts.c
new file mode 100644
index 000000000000..1cd07cd30320
--- /dev/null
+++ b/fs/aufs/opts.c
@@ -0,0 +1,1877 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * mount options/flags
+ */
+
+#include <linux/namei.h>
+#include <linux/types.h> /* a distribution requires */
+#include <linux/parser.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+enum {
+ Opt_br,
+ Opt_add, Opt_del, Opt_mod, Opt_append, Opt_prepend,
+ Opt_idel, Opt_imod,
+ Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash,
+ Opt_rdblk_def, Opt_rdhash_def,
+ Opt_xino, Opt_noxino,
+ Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino,
+ Opt_trunc_xino_path, Opt_itrunc_xino,
+ Opt_trunc_xib, Opt_notrunc_xib,
+ Opt_shwh, Opt_noshwh,
+ Opt_plink, Opt_noplink, Opt_list_plink,
+ Opt_udba,
+ Opt_dio, Opt_nodio,
+ Opt_diropq_a, Opt_diropq_w,
+ Opt_warn_perm, Opt_nowarn_perm,
+ Opt_wbr_copyup, Opt_wbr_create,
+ Opt_fhsm_sec,
+ Opt_verbose, Opt_noverbose,
+ Opt_sum, Opt_nosum, Opt_wsum,
+ Opt_dirperm1, Opt_nodirperm1,
+ Opt_dirren, Opt_nodirren,
+ Opt_acl, Opt_noacl,
+ Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err
+};
+
+static match_table_t options = {
+ {Opt_br, "br=%s"},
+ {Opt_br, "br:%s"},
+
+ {Opt_add, "add=%d:%s"},
+ {Opt_add, "add:%d:%s"},
+ {Opt_add, "ins=%d:%s"},
+ {Opt_add, "ins:%d:%s"},
+ {Opt_append, "append=%s"},
+ {Opt_append, "append:%s"},
+ {Opt_prepend, "prepend=%s"},
+ {Opt_prepend, "prepend:%s"},
+
+ {Opt_del, "del=%s"},
+ {Opt_del, "del:%s"},
+ /* {Opt_idel, "idel:%d"}, */
+ {Opt_mod, "mod=%s"},
+ {Opt_mod, "mod:%s"},
+ /* {Opt_imod, "imod:%d:%s"}, */
+
+ {Opt_dirwh, "dirwh=%d"},
+
+ {Opt_xino, "xino=%s"},
+ {Opt_noxino, "noxino"},
+ {Opt_trunc_xino, "trunc_xino"},
+ {Opt_trunc_xino_v, "trunc_xino_v=%d:%d"},
+ {Opt_notrunc_xino, "notrunc_xino"},
+ {Opt_trunc_xino_path, "trunc_xino=%s"},
+ {Opt_itrunc_xino, "itrunc_xino=%d"},
+ /* {Opt_zxino, "zxino=%s"}, */
+ {Opt_trunc_xib, "trunc_xib"},
+ {Opt_notrunc_xib, "notrunc_xib"},
+
+#ifdef CONFIG_PROC_FS
+ {Opt_plink, "plink"},
+#else
+ {Opt_ignore_silent, "plink"},
+#endif
+
+ {Opt_noplink, "noplink"},
+
+#ifdef CONFIG_AUFS_DEBUG
+ {Opt_list_plink, "list_plink"},
+#endif
+
+ {Opt_udba, "udba=%s"},
+
+ {Opt_dio, "dio"},
+ {Opt_nodio, "nodio"},
+
+#ifdef CONFIG_AUFS_DIRREN
+ {Opt_dirren, "dirren"},
+ {Opt_nodirren, "nodirren"},
+#else
+ {Opt_ignore, "dirren"},
+ {Opt_ignore_silent, "nodirren"},
+#endif
+
+#ifdef CONFIG_AUFS_FHSM
+ {Opt_fhsm_sec, "fhsm_sec=%d"},
+#else
+ {Opt_ignore, "fhsm_sec=%d"},
+#endif
+
+ {Opt_diropq_a, "diropq=always"},
+ {Opt_diropq_a, "diropq=a"},
+ {Opt_diropq_w, "diropq=whiteouted"},
+ {Opt_diropq_w, "diropq=w"},
+
+ {Opt_warn_perm, "warn_perm"},
+ {Opt_nowarn_perm, "nowarn_perm"},
+
+ /* keep them temporary */
+ {Opt_ignore_silent, "nodlgt"},
+ {Opt_ignore, "clean_plink"},
+
+#ifdef CONFIG_AUFS_SHWH
+ {Opt_shwh, "shwh"},
+#endif
+ {Opt_noshwh, "noshwh"},
+
+ {Opt_dirperm1, "dirperm1"},
+ {Opt_nodirperm1, "nodirperm1"},
+
+ {Opt_verbose, "verbose"},
+ {Opt_verbose, "v"},
+ {Opt_noverbose, "noverbose"},
+ {Opt_noverbose, "quiet"},
+ {Opt_noverbose, "q"},
+ {Opt_noverbose, "silent"},
+
+ {Opt_sum, "sum"},
+ {Opt_nosum, "nosum"},
+ {Opt_wsum, "wsum"},
+
+ {Opt_rdcache, "rdcache=%d"},
+ {Opt_rdblk, "rdblk=%d"},
+ {Opt_rdblk_def, "rdblk=def"},
+ {Opt_rdhash, "rdhash=%d"},
+ {Opt_rdhash_def, "rdhash=def"},
+
+ {Opt_wbr_create, "create=%s"},
+ {Opt_wbr_create, "create_policy=%s"},
+ {Opt_wbr_copyup, "cpup=%s"},
+ {Opt_wbr_copyup, "copyup=%s"},
+ {Opt_wbr_copyup, "copyup_policy=%s"},
+
+ /* generic VFS flag */
+#ifdef CONFIG_FS_POSIX_ACL
+ {Opt_acl, "acl"},
+ {Opt_noacl, "noacl"},
+#else
+ {Opt_ignore, "acl"},
+ {Opt_ignore_silent, "noacl"},
+#endif
+
+ /* internal use for the scripts */
+ {Opt_ignore_silent, "si=%s"},
+
+ {Opt_br, "dirs=%s"},
+ {Opt_ignore, "debug=%d"},
+ {Opt_ignore, "delete=whiteout"},
+ {Opt_ignore, "delete=all"},
+ {Opt_ignore, "imap=%s"},
+
+ /* temporary workaround, due to old mount(8)? */
+ {Opt_ignore_silent, "relatime"},
+
+ {Opt_err, NULL}
+};
+
+/* ---------------------------------------------------------------------- */
+
+static const char *au_parser_pattern(int val, match_table_t tbl)
+{
+ struct match_token *p;
+
+ p = tbl;
+ while (p->pattern) {
+ if (p->token == val)
+ return p->pattern;
+ p++;
+ }
+ BUG();
+ return "??";
+}
+
+static const char *au_optstr(int *val, match_table_t tbl)
+{
+ struct match_token *p;
+ int v;
+
+ v = *val;
+ if (!v)
+ goto out;
+ p = tbl;
+ while (p->pattern) {
+ if (p->token
+ && (v & p->token) == p->token) {
+ *val &= ~p->token;
+ return p->pattern;
+ }
+ p++;
+ }
+
+out:
+ return NULL;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t brperm = {
+ {AuBrPerm_RO, AUFS_BRPERM_RO},
+ {AuBrPerm_RR, AUFS_BRPERM_RR},
+ {AuBrPerm_RW, AUFS_BRPERM_RW},
+ {0, NULL}
+};
+
+static match_table_t brattr = {
+ /* general */
+ {AuBrAttr_COO_REG, AUFS_BRATTR_COO_REG},
+ {AuBrAttr_COO_ALL, AUFS_BRATTR_COO_ALL},
+ /* 'unpin' attrib is meaningless since linux-3.18-rc1 */
+ {AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN},
+#ifdef CONFIG_AUFS_FHSM
+ {AuBrAttr_FHSM, AUFS_BRATTR_FHSM},
+#endif
+#ifdef CONFIG_AUFS_XATTR
+ {AuBrAttr_ICEX, AUFS_BRATTR_ICEX},
+ {AuBrAttr_ICEX_SEC, AUFS_BRATTR_ICEX_SEC},
+ {AuBrAttr_ICEX_SYS, AUFS_BRATTR_ICEX_SYS},
+ {AuBrAttr_ICEX_TR, AUFS_BRATTR_ICEX_TR},
+ {AuBrAttr_ICEX_USR, AUFS_BRATTR_ICEX_USR},
+ {AuBrAttr_ICEX_OTH, AUFS_BRATTR_ICEX_OTH},
+#endif
+
+ /* ro/rr branch */
+ {AuBrRAttr_WH, AUFS_BRRATTR_WH},
+
+ /* rw branch */
+ {AuBrWAttr_MOO, AUFS_BRWATTR_MOO},
+ {AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH},
+
+ {0, NULL}
+};
+
+static int br_attr_val(char *str, match_table_t table, substring_t args[])
+{
+ int attr, v;
+ char *p;
+
+ attr = 0;
+ do {
+ p = strchr(str, '+');
+ if (p)
+ *p = 0;
+ v = match_token(str, table, args);
+ if (v) {
+ if (v & AuBrAttr_CMOO_Mask)
+ attr &= ~AuBrAttr_CMOO_Mask;
+ attr |= v;
+ } else {
+ if (p)
+ *p = '+';
+ pr_warn("ignored branch attribute %s\n", str);
+ break;
+ }
+ if (p)
+ str = p + 1;
+ } while (p);
+
+ return attr;
+}
+
+static int au_do_optstr_br_attr(au_br_perm_str_t *str, int perm)
+{
+ int sz;
+ const char *p;
+ char *q;
+
+ q = str->a;
+ *q = 0;
+ p = au_optstr(&perm, brattr);
+ if (p) {
+ sz = strlen(p);
+ memcpy(q, p, sz + 1);
+ q += sz;
+ } else
+ goto out;
+
+ do {
+ p = au_optstr(&perm, brattr);
+ if (p) {
+ *q++ = '+';
+ sz = strlen(p);
+ memcpy(q, p, sz + 1);
+ q += sz;
+ }
+ } while (p);
+
+out:
+ return q - str->a;
+}
+
+static int noinline_for_stack br_perm_val(char *perm)
+{
+ int val, bad, sz;
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ au_br_perm_str_t attr;
+
+ p = strchr(perm, '+');
+ if (p)
+ *p = 0;
+ val = match_token(perm, brperm, args);
+ if (!val) {
+ if (p)
+ *p = '+';
+ pr_warn("ignored branch permission %s\n", perm);
+ val = AuBrPerm_RO;
+ goto out;
+ }
+ if (!p)
+ goto out;
+
+ val |= br_attr_val(p + 1, brattr, args);
+
+ bad = 0;
+ switch (val & AuBrPerm_Mask) {
+ case AuBrPerm_RO:
+ case AuBrPerm_RR:
+ bad = val & AuBrWAttr_Mask;
+ val &= ~AuBrWAttr_Mask;
+ break;
+ case AuBrPerm_RW:
+ bad = val & AuBrRAttr_Mask;
+ val &= ~AuBrRAttr_Mask;
+ break;
+ }
+
+ /*
+ * 'unpin' attrib becomes meaningless since linux-3.18-rc1, but aufs
+ * does not treat it as an error, just warning.
+ * this is a tiny guard for the user operation.
+ */
+ if (val & AuBrAttr_UNPIN) {
+ bad |= AuBrAttr_UNPIN;
+ val &= ~AuBrAttr_UNPIN;
+ }
+
+ if (unlikely(bad)) {
+ sz = au_do_optstr_br_attr(&attr, bad);
+ AuDebugOn(!sz);
+ pr_warn("ignored branch attribute %s\n", attr.a);
+ }
+
+out:
+ return val;
+}
+
+void au_optstr_br_perm(au_br_perm_str_t *str, int perm)
+{
+ au_br_perm_str_t attr;
+ const char *p;
+ char *q;
+ int sz;
+
+ q = str->a;
+ p = au_optstr(&perm, brperm);
+ AuDebugOn(!p || !*p);
+ sz = strlen(p);
+ memcpy(q, p, sz + 1);
+ q += sz;
+
+ sz = au_do_optstr_br_attr(&attr, perm);
+ if (sz) {
+ *q++ = '+';
+ memcpy(q, attr.a, sz + 1);
+ }
+
+ AuDebugOn(strlen(str->a) >= sizeof(str->a));
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t udbalevel = {
+ {AuOpt_UDBA_REVAL, "reval"},
+ {AuOpt_UDBA_NONE, "none"},
+#ifdef CONFIG_AUFS_HNOTIFY
+ {AuOpt_UDBA_HNOTIFY, "notify"}, /* abstraction */
+#ifdef CONFIG_AUFS_HFSNOTIFY
+ {AuOpt_UDBA_HNOTIFY, "fsnotify"},
+#endif
+#endif
+ {-1, NULL}
+};
+
+static int noinline_for_stack udba_val(char *str)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ return match_token(str, udbalevel, args);
+}
+
+const char *au_optstr_udba(int udba)
+{
+ return au_parser_pattern(udba, udbalevel);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t au_wbr_create_policy = {
+ {AuWbrCreate_TDP, "tdp"},
+ {AuWbrCreate_TDP, "top-down-parent"},
+ {AuWbrCreate_RR, "rr"},
+ {AuWbrCreate_RR, "round-robin"},
+ {AuWbrCreate_MFS, "mfs"},
+ {AuWbrCreate_MFS, "most-free-space"},
+ {AuWbrCreate_MFSV, "mfs:%d"},
+ {AuWbrCreate_MFSV, "most-free-space:%d"},
+
+ /* top-down regardless the parent, and then mfs */
+ {AuWbrCreate_TDMFS, "tdmfs:%d"},
+ {AuWbrCreate_TDMFSV, "tdmfs:%d:%d"},
+
+ {AuWbrCreate_MFSRR, "mfsrr:%d"},
+ {AuWbrCreate_MFSRRV, "mfsrr:%d:%d"},
+ {AuWbrCreate_PMFS, "pmfs"},
+ {AuWbrCreate_PMFSV, "pmfs:%d"},
+ {AuWbrCreate_PMFSRR, "pmfsrr:%d"},
+ {AuWbrCreate_PMFSRRV, "pmfsrr:%d:%d"},
+
+ {-1, NULL}
+};
+
+static int au_wbr_mfs_wmark(substring_t *arg, char *str,
+ struct au_opt_wbr_create *create)
+{
+ int err;
+ unsigned long long ull;
+
+ err = 0;
+ if (!match_u64(arg, &ull))
+ create->mfsrr_watermark = ull;
+ else {
+ pr_err("bad integer in %s\n", str);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int au_wbr_mfs_sec(substring_t *arg, char *str,
+ struct au_opt_wbr_create *create)
+{
+ int n, err;
+
+ err = 0;
+ if (!match_int(arg, &n) && 0 <= n && n <= AUFS_MFS_MAX_SEC)
+ create->mfs_second = n;
+ else {
+ pr_err("bad integer in %s\n", str);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int noinline_for_stack
+au_wbr_create_val(char *str, struct au_opt_wbr_create *create)
+{
+ int err, e;
+ substring_t args[MAX_OPT_ARGS];
+
+ err = match_token(str, au_wbr_create_policy, args);
+ create->wbr_create = err;
+ switch (err) {
+ case AuWbrCreate_MFSRRV:
+ case AuWbrCreate_TDMFSV:
+ case AuWbrCreate_PMFSRRV:
+ e = au_wbr_mfs_wmark(&args[0], str, create);
+ if (!e)
+ e = au_wbr_mfs_sec(&args[1], str, create);
+ if (unlikely(e))
+ err = e;
+ break;
+ case AuWbrCreate_MFSRR:
+ case AuWbrCreate_TDMFS:
+ case AuWbrCreate_PMFSRR:
+ e = au_wbr_mfs_wmark(&args[0], str, create);
+ if (unlikely(e)) {
+ err = e;
+ break;
+ }
+ /*FALLTHROUGH*/
+ case AuWbrCreate_MFS:
+ case AuWbrCreate_PMFS:
+ create->mfs_second = AUFS_MFS_DEF_SEC;
+ break;
+ case AuWbrCreate_MFSV:
+ case AuWbrCreate_PMFSV:
+ e = au_wbr_mfs_sec(&args[0], str, create);
+ if (unlikely(e))
+ err = e;
+ break;
+ }
+
+ return err;
+}
+
+const char *au_optstr_wbr_create(int wbr_create)
+{
+ return au_parser_pattern(wbr_create, au_wbr_create_policy);
+}
+
+static match_table_t au_wbr_copyup_policy = {
+ {AuWbrCopyup_TDP, "tdp"},
+ {AuWbrCopyup_TDP, "top-down-parent"},
+ {AuWbrCopyup_BUP, "bup"},
+ {AuWbrCopyup_BUP, "bottom-up-parent"},
+ {AuWbrCopyup_BU, "bu"},
+ {AuWbrCopyup_BU, "bottom-up"},
+ {-1, NULL}
+};
+
+static int noinline_for_stack au_wbr_copyup_val(char *str)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ return match_token(str, au_wbr_copyup_policy, args);
+}
+
+const char *au_optstr_wbr_copyup(int wbr_copyup)
+{
+ return au_parser_pattern(wbr_copyup, au_wbr_copyup_policy);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+
+static void dump_opts(struct au_opts *opts)
+{
+#ifdef CONFIG_AUFS_DEBUG
+ /* reduce stack space */
+ union {
+ struct au_opt_add *add;
+ struct au_opt_del *del;
+ struct au_opt_mod *mod;
+ struct au_opt_xino *xino;
+ struct au_opt_xino_itrunc *xino_itrunc;
+ struct au_opt_wbr_create *create;
+ } u;
+ struct au_opt *opt;
+
+ opt = opts->opt;
+ while (opt->type != Opt_tail) {
+ switch (opt->type) {
+ case Opt_add:
+ u.add = &opt->add;
+ AuDbg("add {b%d, %s, 0x%x, %p}\n",
+ u.add->bindex, u.add->pathname, u.add->perm,
+ u.add->path.dentry);
+ break;
+ case Opt_del:
+ case Opt_idel:
+ u.del = &opt->del;
+ AuDbg("del {%s, %p}\n",
+ u.del->pathname, u.del->h_path.dentry);
+ break;
+ case Opt_mod:
+ case Opt_imod:
+ u.mod = &opt->mod;
+ AuDbg("mod {%s, 0x%x, %p}\n",
+ u.mod->path, u.mod->perm, u.mod->h_root);
+ break;
+ case Opt_append:
+ u.add = &opt->add;
+ AuDbg("append {b%d, %s, 0x%x, %p}\n",
+ u.add->bindex, u.add->pathname, u.add->perm,
+ u.add->path.dentry);
+ break;
+ case Opt_prepend:
+ u.add = &opt->add;
+ AuDbg("prepend {b%d, %s, 0x%x, %p}\n",
+ u.add->bindex, u.add->pathname, u.add->perm,
+ u.add->path.dentry);
+ break;
+ case Opt_dirwh:
+ AuDbg("dirwh %d\n", opt->dirwh);
+ break;
+ case Opt_rdcache:
+ AuDbg("rdcache %d\n", opt->rdcache);
+ break;
+ case Opt_rdblk:
+ AuDbg("rdblk %u\n", opt->rdblk);
+ break;
+ case Opt_rdblk_def:
+ AuDbg("rdblk_def\n");
+ break;
+ case Opt_rdhash:
+ AuDbg("rdhash %u\n", opt->rdhash);
+ break;
+ case Opt_rdhash_def:
+ AuDbg("rdhash_def\n");
+ break;
+ case Opt_xino:
+ u.xino = &opt->xino;
+ AuDbg("xino {%s %pD}\n", u.xino->path, u.xino->file);
+ break;
+ case Opt_trunc_xino:
+ AuLabel(trunc_xino);
+ break;
+ case Opt_notrunc_xino:
+ AuLabel(notrunc_xino);
+ break;
+ case Opt_trunc_xino_path:
+ case Opt_itrunc_xino:
+ u.xino_itrunc = &opt->xino_itrunc;
+ AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex);
+ break;
+ case Opt_noxino:
+ AuLabel(noxino);
+ break;
+ case Opt_trunc_xib:
+ AuLabel(trunc_xib);
+ break;
+ case Opt_notrunc_xib:
+ AuLabel(notrunc_xib);
+ break;
+ case Opt_shwh:
+ AuLabel(shwh);
+ break;
+ case Opt_noshwh:
+ AuLabel(noshwh);
+ break;
+ case Opt_dirperm1:
+ AuLabel(dirperm1);
+ break;
+ case Opt_nodirperm1:
+ AuLabel(nodirperm1);
+ break;
+ case Opt_plink:
+ AuLabel(plink);
+ break;
+ case Opt_noplink:
+ AuLabel(noplink);
+ break;
+ case Opt_list_plink:
+ AuLabel(list_plink);
+ break;
+ case Opt_udba:
+ AuDbg("udba %d, %s\n",
+ opt->udba, au_optstr_udba(opt->udba));
+ break;
+ case Opt_dio:
+ AuLabel(dio);
+ break;
+ case Opt_nodio:
+ AuLabel(nodio);
+ break;
+ case Opt_diropq_a:
+ AuLabel(diropq_a);
+ break;
+ case Opt_diropq_w:
+ AuLabel(diropq_w);
+ break;
+ case Opt_warn_perm:
+ AuLabel(warn_perm);
+ break;
+ case Opt_nowarn_perm:
+ AuLabel(nowarn_perm);
+ break;
+ case Opt_verbose:
+ AuLabel(verbose);
+ break;
+ case Opt_noverbose:
+ AuLabel(noverbose);
+ break;
+ case Opt_sum:
+ AuLabel(sum);
+ break;
+ case Opt_nosum:
+ AuLabel(nosum);
+ break;
+ case Opt_wsum:
+ AuLabel(wsum);
+ break;
+ case Opt_wbr_create:
+ u.create = &opt->wbr_create;
+ AuDbg("create %d, %s\n", u.create->wbr_create,
+ au_optstr_wbr_create(u.create->wbr_create));
+ switch (u.create->wbr_create) {
+ case AuWbrCreate_MFSV:
+ case AuWbrCreate_PMFSV:
+ AuDbg("%d sec\n", u.create->mfs_second);
+ break;
+ case AuWbrCreate_MFSRR:
+ case AuWbrCreate_TDMFS:
+ AuDbg("%llu watermark\n",
+ u.create->mfsrr_watermark);
+ break;
+ case AuWbrCreate_MFSRRV:
+ case AuWbrCreate_TDMFSV:
+ case AuWbrCreate_PMFSRRV:
+ AuDbg("%llu watermark, %d sec\n",
+ u.create->mfsrr_watermark,
+ u.create->mfs_second);
+ break;
+ }
+ break;
+ case Opt_wbr_copyup:
+ AuDbg("copyup %d, %s\n", opt->wbr_copyup,
+ au_optstr_wbr_copyup(opt->wbr_copyup));
+ break;
+ case Opt_fhsm_sec:
+ AuDbg("fhsm_sec %u\n", opt->fhsm_second);
+ break;
+ case Opt_dirren:
+ AuLabel(dirren);
+ break;
+ case Opt_nodirren:
+ AuLabel(nodirren);
+ break;
+ case Opt_acl:
+ AuLabel(acl);
+ break;
+ case Opt_noacl:
+ AuLabel(noacl);
+ break;
+ default:
+ BUG();
+ }
+ opt++;
+ }
+#endif
+}
+
+void au_opts_free(struct au_opts *opts)
+{
+ struct au_opt *opt;
+
+ opt = opts->opt;
+ while (opt->type != Opt_tail) {
+ switch (opt->type) {
+ case Opt_add:
+ case Opt_append:
+ case Opt_prepend:
+ path_put(&opt->add.path);
+ break;
+ case Opt_del:
+ case Opt_idel:
+ path_put(&opt->del.h_path);
+ break;
+ case Opt_mod:
+ case Opt_imod:
+ dput(opt->mod.h_root);
+ break;
+ case Opt_xino:
+ fput(opt->xino.file);
+ break;
+ }
+ opt++;
+ }
+}
+
+static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags,
+ aufs_bindex_t bindex)
+{
+ int err;
+ struct au_opt_add *add = &opt->add;
+ char *p;
+
+ add->bindex = bindex;
+ add->perm = AuBrPerm_RO;
+ add->pathname = opt_str;
+ p = strchr(opt_str, '=');
+ if (p) {
+ *p++ = 0;
+ if (*p)
+ add->perm = br_perm_val(p);
+ }
+
+ err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path);
+ if (!err) {
+ if (!p) {
+ add->perm = AuBrPerm_RO;
+ if (au_test_fs_rr(add->path.dentry->d_sb))
+ add->perm = AuBrPerm_RR;
+ else if (!bindex && !(sb_flags & SB_RDONLY))
+ add->perm = AuBrPerm_RW;
+ }
+ opt->type = Opt_add;
+ goto out;
+ }
+ pr_err("lookup failed %s (%d)\n", add->pathname, err);
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
+static int au_opts_parse_del(struct au_opt_del *del, substring_t args[])
+{
+ int err;
+
+ del->pathname = args[0].from;
+ AuDbg("del path %s\n", del->pathname);
+
+ err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path);
+ if (unlikely(err))
+ pr_err("lookup failed %s (%d)\n", del->pathname, err);
+
+ return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex,
+ struct au_opt_del *del, substring_t args[])
+{
+ int err;
+ struct dentry *root;
+
+ err = -EINVAL;
+ root = sb->s_root;
+ aufs_read_lock(root, AuLock_FLUSH);
+ if (bindex < 0 || au_sbbot(sb) < bindex) {
+ pr_err("out of bounds, %d\n", bindex);
+ goto out;
+ }
+
+ err = 0;
+ del->h_path.dentry = dget(au_h_dptr(root, bindex));
+ del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex));
+
+out:
+ aufs_read_unlock(root, !AuLock_IR);
+ return err;
+}
+#endif
+
+static int noinline_for_stack
+au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[])
+{
+ int err;
+ struct path path;
+ char *p;
+
+ err = -EINVAL;
+ mod->path = args[0].from;
+ p = strchr(mod->path, '=');
+ if (unlikely(!p)) {
+ pr_err("no permission %s\n", args[0].from);
+ goto out;
+ }
+
+ *p++ = 0;
+ err = vfsub_kern_path(mod->path, lkup_dirflags, &path);
+ if (unlikely(err)) {
+ pr_err("lookup failed %s (%d)\n", mod->path, err);
+ goto out;
+ }
+
+ mod->perm = br_perm_val(p);
+ AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p);
+ mod->h_root = dget(path.dentry);
+ path_put(&path);
+
+out:
+ return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex,
+ struct au_opt_mod *mod, substring_t args[])
+{
+ int err;
+ struct dentry *root;
+
+ err = -EINVAL;
+ root = sb->s_root;
+ aufs_read_lock(root, AuLock_FLUSH);
+ if (bindex < 0 || au_sbbot(sb) < bindex) {
+ pr_err("out of bounds, %d\n", bindex);
+ goto out;
+ }
+
+ err = 0;
+ mod->perm = br_perm_val(args[1].from);
+ AuDbg("mod path %s, perm 0x%x, %s\n",
+ mod->path, mod->perm, args[1].from);
+ mod->h_root = dget(au_h_dptr(root, bindex));
+
+out:
+ aufs_read_unlock(root, !AuLock_IR);
+ return err;
+}
+#endif
+
+static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino,
+ substring_t args[])
+{
+ int err;
+ struct file *file;
+
+ file = au_xino_create(sb, args[0].from, /*silent*/0);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+
+ err = -EINVAL;
+ if (unlikely(file->f_path.dentry->d_sb == sb)) {
+ fput(file);
+ pr_err("%s must be outside\n", args[0].from);
+ goto out;
+ }
+
+ err = 0;
+ xino->file = file;
+ xino->path = args[0].from;
+
+out:
+ return err;
+}
+
+static int noinline_for_stack
+au_opts_parse_xino_itrunc_path(struct super_block *sb,
+ struct au_opt_xino_itrunc *xino_itrunc,
+ substring_t args[])
+{
+ int err;
+ aufs_bindex_t bbot, bindex;
+ struct path path;
+ struct dentry *root;
+
+ err = vfsub_kern_path(args[0].from, lkup_dirflags, &path);
+ if (unlikely(err)) {
+ pr_err("lookup failed %s (%d)\n", args[0].from, err);
+ goto out;
+ }
+
+ xino_itrunc->bindex = -1;
+ root = sb->s_root;
+ aufs_read_lock(root, AuLock_FLUSH);
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ if (au_h_dptr(root, bindex) == path.dentry) {
+ xino_itrunc->bindex = bindex;
+ break;
+ }
+ }
+ aufs_read_unlock(root, !AuLock_IR);
+ path_put(&path);
+
+ if (unlikely(xino_itrunc->bindex < 0)) {
+ pr_err("no such branch %s\n", args[0].from);
+ err = -EINVAL;
+ }
+
+out:
+ return err;
+}
+
+/* called without aufs lock */
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts)
+{
+ int err, n, token;
+ aufs_bindex_t bindex;
+ unsigned char skipped;
+ struct dentry *root;
+ struct au_opt *opt, *opt_tail;
+ char *opt_str;
+ /* reduce the stack space */
+ union {
+ struct au_opt_xino_itrunc *xino_itrunc;
+ struct au_opt_wbr_create *create;
+ } u;
+ struct {
+ substring_t args[MAX_OPT_ARGS];
+ } *a;
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ root = sb->s_root;
+ err = 0;
+ bindex = 0;
+ opt = opts->opt;
+ opt_tail = opt + opts->max_opt - 1;
+ opt->type = Opt_tail;
+ while (!err && (opt_str = strsep(&str, ",")) && *opt_str) {
+ err = -EINVAL;
+ skipped = 0;
+ token = match_token(opt_str, options, a->args);
+ switch (token) {
+ case Opt_br:
+ err = 0;
+ while (!err && (opt_str = strsep(&a->args[0].from, ":"))
+ && *opt_str) {
+ err = opt_add(opt, opt_str, opts->sb_flags,
+ bindex++);
+ if (unlikely(!err && ++opt > opt_tail)) {
+ err = -E2BIG;
+ break;
+ }
+ opt->type = Opt_tail;
+ skipped = 1;
+ }
+ break;
+ case Opt_add:
+ if (unlikely(match_int(&a->args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ bindex = n;
+ err = opt_add(opt, a->args[1].from, opts->sb_flags,
+ bindex);
+ if (!err)
+ opt->type = token;
+ break;
+ case Opt_append:
+ err = opt_add(opt, a->args[0].from, opts->sb_flags,
+ /*dummy bindex*/1);
+ if (!err)
+ opt->type = token;
+ break;
+ case Opt_prepend:
+ err = opt_add(opt, a->args[0].from, opts->sb_flags,
+ /*bindex*/0);
+ if (!err)
+ opt->type = token;
+ break;
+ case Opt_del:
+ err = au_opts_parse_del(&opt->del, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+#if 0 /* reserved for future use */
+ case Opt_idel:
+ del->pathname = "(indexed)";
+ if (unlikely(match_int(&args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ err = au_opts_parse_idel(sb, n, &opt->del, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+#endif
+ case Opt_mod:
+ err = au_opts_parse_mod(&opt->mod, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+#ifdef IMOD /* reserved for future use */
+ case Opt_imod:
+ u.mod->path = "(indexed)";
+ if (unlikely(match_int(&a->args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ err = au_opts_parse_imod(sb, n, &opt->mod, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+#endif
+ case Opt_xino:
+ err = au_opts_parse_xino(sb, &opt->xino, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+
+ case Opt_trunc_xino_path:
+ err = au_opts_parse_xino_itrunc_path
+ (sb, &opt->xino_itrunc, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+
+ case Opt_itrunc_xino:
+ u.xino_itrunc = &opt->xino_itrunc;
+ if (unlikely(match_int(&a->args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ u.xino_itrunc->bindex = n;
+ aufs_read_lock(root, AuLock_FLUSH);
+ if (n < 0 || au_sbbot(sb) < n) {
+ pr_err("out of bounds, %d\n", n);
+ aufs_read_unlock(root, !AuLock_IR);
+ break;
+ }
+ aufs_read_unlock(root, !AuLock_IR);
+ err = 0;
+ opt->type = token;
+ break;
+
+ case Opt_dirwh:
+ if (unlikely(match_int(&a->args[0], &opt->dirwh)))
+ break;
+ err = 0;
+ opt->type = token;
+ break;
+
+ case Opt_rdcache:
+ if (unlikely(match_int(&a->args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ if (unlikely(n > AUFS_RDCACHE_MAX)) {
+ pr_err("rdcache must be smaller than %d\n",
+ AUFS_RDCACHE_MAX);
+ break;
+ }
+ opt->rdcache = n;
+ err = 0;
+ opt->type = token;
+ break;
+ case Opt_rdblk:
+ if (unlikely(match_int(&a->args[0], &n)
+ || n < 0
+ || n > KMALLOC_MAX_SIZE)) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ if (unlikely(n && n < NAME_MAX)) {
+ pr_err("rdblk must be larger than %d\n",
+ NAME_MAX);
+ break;
+ }
+ opt->rdblk = n;
+ err = 0;
+ opt->type = token;
+ break;
+ case Opt_rdhash:
+ if (unlikely(match_int(&a->args[0], &n)
+ || n < 0
+ || n * sizeof(struct hlist_head)
+ > KMALLOC_MAX_SIZE)) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ opt->rdhash = n;
+ err = 0;
+ opt->type = token;
+ break;
+
+ case Opt_trunc_xino:
+ case Opt_notrunc_xino:
+ case Opt_noxino:
+ case Opt_trunc_xib:
+ case Opt_notrunc_xib:
+ case Opt_shwh:
+ case Opt_noshwh:
+ case Opt_dirperm1:
+ case Opt_nodirperm1:
+ case Opt_plink:
+ case Opt_noplink:
+ case Opt_list_plink:
+ case Opt_dio:
+ case Opt_nodio:
+ case Opt_diropq_a:
+ case Opt_diropq_w:
+ case Opt_warn_perm:
+ case Opt_nowarn_perm:
+ case Opt_verbose:
+ case Opt_noverbose:
+ case Opt_sum:
+ case Opt_nosum:
+ case Opt_wsum:
+ case Opt_rdblk_def:
+ case Opt_rdhash_def:
+ case Opt_dirren:
+ case Opt_nodirren:
+ case Opt_acl:
+ case Opt_noacl:
+ err = 0;
+ opt->type = token;
+ break;
+
+ case Opt_udba:
+ opt->udba = udba_val(a->args[0].from);
+ if (opt->udba >= 0) {
+ err = 0;
+ opt->type = token;
+ } else
+ pr_err("wrong value, %s\n", opt_str);
+ break;
+
+ case Opt_wbr_create:
+ u.create = &opt->wbr_create;
+ u.create->wbr_create
+ = au_wbr_create_val(a->args[0].from, u.create);
+ if (u.create->wbr_create >= 0) {
+ err = 0;
+ opt->type = token;
+ } else
+ pr_err("wrong value, %s\n", opt_str);
+ break;
+ case Opt_wbr_copyup:
+ opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from);
+ if (opt->wbr_copyup >= 0) {
+ err = 0;
+ opt->type = token;
+ } else
+ pr_err("wrong value, %s\n", opt_str);
+ break;
+
+ case Opt_fhsm_sec:
+ if (unlikely(match_int(&a->args[0], &n)
+ || n < 0)) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ if (sysaufs_brs) {
+ opt->fhsm_second = n;
+ opt->type = token;
+ } else
+ pr_warn("ignored %s\n", opt_str);
+ err = 0;
+ break;
+
+ case Opt_ignore:
+ pr_warn("ignored %s\n", opt_str);
+ /*FALLTHROUGH*/
+ case Opt_ignore_silent:
+ skipped = 1;
+ err = 0;
+ break;
+ case Opt_err:
+ pr_err("unknown option %s\n", opt_str);
+ break;
+ }
+
+ if (!err && !skipped) {
+ if (unlikely(++opt > opt_tail)) {
+ err = -E2BIG;
+ opt--;
+ opt->type = Opt_tail;
+ break;
+ }
+ opt->type = Opt_tail;
+ }
+ }
+
+ au_kfree_rcu(a);
+ dump_opts(opts);
+ if (unlikely(err))
+ au_opts_free(opts);
+
+out:
+ return err;
+}
+
+static int au_opt_wbr_create(struct super_block *sb,
+ struct au_opt_wbr_create *create)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ err = 1; /* handled */
+ sbinfo = au_sbi(sb);
+ if (sbinfo->si_wbr_create_ops->fin) {
+ err = sbinfo->si_wbr_create_ops->fin(sb);
+ if (!err)
+ err = 1;
+ }
+
+ sbinfo->si_wbr_create = create->wbr_create;
+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create;
+ switch (create->wbr_create) {
+ case AuWbrCreate_MFSRRV:
+ case AuWbrCreate_MFSRR:
+ case AuWbrCreate_TDMFS:
+ case AuWbrCreate_TDMFSV:
+ case AuWbrCreate_PMFSRR:
+ case AuWbrCreate_PMFSRRV:
+ sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark;
+ /*FALLTHROUGH*/
+ case AuWbrCreate_MFS:
+ case AuWbrCreate_MFSV:
+ case AuWbrCreate_PMFS:
+ case AuWbrCreate_PMFSV:
+ sbinfo->si_wbr_mfs.mfs_expire
+ = msecs_to_jiffies(create->mfs_second * MSEC_PER_SEC);
+ break;
+ }
+
+ if (sbinfo->si_wbr_create_ops->init)
+ sbinfo->si_wbr_create_ops->init(sb); /* ignore */
+
+ return err;
+}
+
+/*
+ * returns,
+ * plus: processed without an error
+ * zero: unprocessed
+ */
+static int au_opt_simple(struct super_block *sb, struct au_opt *opt,
+ struct au_opts *opts)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ err = 1; /* handled */
+ sbinfo = au_sbi(sb);
+ switch (opt->type) {
+ case Opt_udba:
+ sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+ sbinfo->si_mntflags |= opt->udba;
+ opts->given_udba |= opt->udba;
+ break;
+
+ case Opt_plink:
+ au_opt_set(sbinfo->si_mntflags, PLINK);
+ break;
+ case Opt_noplink:
+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
+ au_plink_put(sb, /*verbose*/1);
+ au_opt_clr(sbinfo->si_mntflags, PLINK);
+ break;
+ case Opt_list_plink:
+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
+ au_plink_list(sb);
+ break;
+
+ case Opt_dio:
+ au_opt_set(sbinfo->si_mntflags, DIO);
+ au_fset_opts(opts->flags, REFRESH_DYAOP);
+ break;
+ case Opt_nodio:
+ au_opt_clr(sbinfo->si_mntflags, DIO);
+ au_fset_opts(opts->flags, REFRESH_DYAOP);
+ break;
+
+ case Opt_fhsm_sec:
+ au_fhsm_set(sbinfo, opt->fhsm_second);
+ break;
+
+ case Opt_diropq_a:
+ au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+ break;
+ case Opt_diropq_w:
+ au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+ break;
+
+ case Opt_warn_perm:
+ au_opt_set(sbinfo->si_mntflags, WARN_PERM);
+ break;
+ case Opt_nowarn_perm:
+ au_opt_clr(sbinfo->si_mntflags, WARN_PERM);
+ break;
+
+ case Opt_verbose:
+ au_opt_set(sbinfo->si_mntflags, VERBOSE);
+ break;
+ case Opt_noverbose:
+ au_opt_clr(sbinfo->si_mntflags, VERBOSE);
+ break;
+
+ case Opt_sum:
+ au_opt_set(sbinfo->si_mntflags, SUM);
+ break;
+ case Opt_wsum:
+ au_opt_clr(sbinfo->si_mntflags, SUM);
+ au_opt_set(sbinfo->si_mntflags, SUM_W);
+ case Opt_nosum:
+ au_opt_clr(sbinfo->si_mntflags, SUM);
+ au_opt_clr(sbinfo->si_mntflags, SUM_W);
+ break;
+
+ case Opt_wbr_create:
+ err = au_opt_wbr_create(sb, &opt->wbr_create);
+ break;
+ case Opt_wbr_copyup:
+ sbinfo->si_wbr_copyup = opt->wbr_copyup;
+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup;
+ break;
+
+ case Opt_dirwh:
+ sbinfo->si_dirwh = opt->dirwh;
+ break;
+
+ case Opt_rdcache:
+ sbinfo->si_rdcache
+ = msecs_to_jiffies(opt->rdcache * MSEC_PER_SEC);
+ break;
+ case Opt_rdblk:
+ sbinfo->si_rdblk = opt->rdblk;
+ break;
+ case Opt_rdblk_def:
+ sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+ break;
+ case Opt_rdhash:
+ sbinfo->si_rdhash = opt->rdhash;
+ break;
+ case Opt_rdhash_def:
+ sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+ break;
+
+ case Opt_shwh:
+ au_opt_set(sbinfo->si_mntflags, SHWH);
+ break;
+ case Opt_noshwh:
+ au_opt_clr(sbinfo->si_mntflags, SHWH);
+ break;
+
+ case Opt_dirperm1:
+ au_opt_set(sbinfo->si_mntflags, DIRPERM1);
+ break;
+ case Opt_nodirperm1:
+ au_opt_clr(sbinfo->si_mntflags, DIRPERM1);
+ break;
+
+ case Opt_trunc_xino:
+ au_opt_set(sbinfo->si_mntflags, TRUNC_XINO);
+ break;
+ case Opt_notrunc_xino:
+ au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO);
+ break;
+
+ case Opt_trunc_xino_path:
+ case Opt_itrunc_xino:
+ err = au_xino_trunc(sb, opt->xino_itrunc.bindex,
+ /*idx_begin*/0);
+ if (!err)
+ err = 1;
+ break;
+
+ case Opt_trunc_xib:
+ au_fset_opts(opts->flags, TRUNC_XIB);
+ break;
+ case Opt_notrunc_xib:
+ au_fclr_opts(opts->flags, TRUNC_XIB);
+ break;
+
+ case Opt_dirren:
+ err = 1;
+ if (!au_opt_test(sbinfo->si_mntflags, DIRREN)) {
+ err = au_dr_opt_set(sb);
+ if (!err)
+ err = 1;
+ }
+ if (err == 1)
+ au_opt_set(sbinfo->si_mntflags, DIRREN);
+ break;
+ case Opt_nodirren:
+ err = 1;
+ if (au_opt_test(sbinfo->si_mntflags, DIRREN)) {
+ err = au_dr_opt_clr(sb, au_ftest_opts(opts->flags,
+ DR_FLUSHED));
+ if (!err)
+ err = 1;
+ }
+ if (err == 1)
+ au_opt_clr(sbinfo->si_mntflags, DIRREN);
+ break;
+
+ case Opt_acl:
+ sb->s_flags |= SB_POSIXACL;
+ break;
+ case Opt_noacl:
+ sb->s_flags &= ~SB_POSIXACL;
+ break;
+
+ default:
+ err = 0;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * returns tri-state.
+ * plus: processed without an error
+ * zero: unprocessed
+ * minus: error
+ */
+static int au_opt_br(struct super_block *sb, struct au_opt *opt,
+ struct au_opts *opts)
+{
+ int err, do_refresh;
+
+ err = 0;
+ switch (opt->type) {
+ case Opt_append:
+ opt->add.bindex = au_sbbot(sb) + 1;
+ if (opt->add.bindex < 0)
+ opt->add.bindex = 0;
+ goto add;
+ case Opt_prepend:
+ opt->add.bindex = 0;
+ add: /* indented label */
+ case Opt_add:
+ err = au_br_add(sb, &opt->add,
+ au_ftest_opts(opts->flags, REMOUNT));
+ if (!err) {
+ err = 1;
+ au_fset_opts(opts->flags, REFRESH);
+ }
+ break;
+
+ case Opt_del:
+ case Opt_idel:
+ err = au_br_del(sb, &opt->del,
+ au_ftest_opts(opts->flags, REMOUNT));
+ if (!err) {
+ err = 1;
+ au_fset_opts(opts->flags, TRUNC_XIB);
+ au_fset_opts(opts->flags, REFRESH);
+ }
+ break;
+
+ case Opt_mod:
+ case Opt_imod:
+ err = au_br_mod(sb, &opt->mod,
+ au_ftest_opts(opts->flags, REMOUNT),
+ &do_refresh);
+ if (!err) {
+ err = 1;
+ if (do_refresh)
+ au_fset_opts(opts->flags, REFRESH);
+ }
+ break;
+ }
+ return err;
+}
+
+static int au_opt_xino(struct super_block *sb, struct au_opt *opt,
+ struct au_opt_xino **opt_xino,
+ struct au_opts *opts)
+{
+ int err;
+
+ err = 0;
+ switch (opt->type) {
+ case Opt_xino:
+ err = au_xino_set(sb, &opt->xino,
+ !!au_ftest_opts(opts->flags, REMOUNT));
+ if (unlikely(err))
+ break;
+
+ *opt_xino = &opt->xino;
+ break;
+
+ case Opt_noxino:
+ au_xino_clr(sb);
+ *opt_xino = (void *)-1;
+ break;
+ }
+
+ return err;
+}
+
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+ unsigned int pending)
+{
+ int err, fhsm;
+ aufs_bindex_t bindex, bbot;
+ unsigned char do_plink, skip, do_free, can_no_dreval;
+ struct au_branch *br;
+ struct au_wbr *wbr;
+ struct dentry *root, *dentry;
+ struct inode *dir, *h_dir;
+ struct au_sbinfo *sbinfo;
+ struct au_hinode *hdir;
+
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA));
+
+ if (!(sb_flags & SB_RDONLY)) {
+ if (unlikely(!au_br_writable(au_sbr_perm(sb, 0))))
+ pr_warn("first branch should be rw\n");
+ if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH)))
+ pr_warn_once("shwh should be used with ro\n");
+ }
+
+ if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY)
+ && !au_opt_test(sbinfo->si_mntflags, XINO))
+ pr_warn_once("udba=*notify requires xino\n");
+
+ if (au_opt_test(sbinfo->si_mntflags, DIRPERM1))
+ pr_warn_once("dirperm1 breaks the protection"
+ " by the permission bits on the lower branch\n");
+
+ err = 0;
+ fhsm = 0;
+ root = sb->s_root;
+ dir = d_inode(root);
+ do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK);
+ can_no_dreval = !!au_opt_test((sbinfo->si_mntflags | pending),
+ UDBA_NONE);
+ bbot = au_sbbot(sb);
+ for (bindex = 0; !err && bindex <= bbot; bindex++) {
+ skip = 0;
+ h_dir = au_h_iptr(dir, bindex);
+ br = au_sbr(sb, bindex);
+
+ if ((br->br_perm & AuBrAttr_ICEX)
+ && !h_dir->i_op->listxattr)
+ br->br_perm &= ~AuBrAttr_ICEX;
+#if 0
+ if ((br->br_perm & AuBrAttr_ICEX_SEC)
+ && (au_br_sb(br)->s_flags & SB_NOSEC))
+ br->br_perm &= ~AuBrAttr_ICEX_SEC;
+#endif
+
+ do_free = 0;
+ wbr = br->br_wbr;
+ if (wbr)
+ wbr_wh_read_lock(wbr);
+
+ if (!au_br_writable(br->br_perm)) {
+ do_free = !!wbr;
+ skip = (!wbr
+ || (!wbr->wbr_whbase
+ && !wbr->wbr_plink
+ && !wbr->wbr_orph));
+ } else if (!au_br_wh_linkable(br->br_perm)) {
+ /* skip = (!br->br_whbase && !br->br_orph); */
+ skip = (!wbr || !wbr->wbr_whbase);
+ if (skip && wbr) {
+ if (do_plink)
+ skip = !!wbr->wbr_plink;
+ else
+ skip = !wbr->wbr_plink;
+ }
+ } else {
+ /* skip = (br->br_whbase && br->br_ohph); */
+ skip = (wbr && wbr->wbr_whbase);
+ if (skip) {
+ if (do_plink)
+ skip = !!wbr->wbr_plink;
+ else
+ skip = !wbr->wbr_plink;
+ }
+ }
+ if (wbr)
+ wbr_wh_read_unlock(wbr);
+
+ if (can_no_dreval) {
+ dentry = br->br_path.dentry;
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_flags &
+ (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE))
+ can_no_dreval = 0;
+ spin_unlock(&dentry->d_lock);
+ }
+
+ if (au_br_fhsm(br->br_perm)) {
+ fhsm++;
+ AuDebugOn(!br->br_fhsm);
+ }
+
+ if (skip)
+ continue;
+
+ hdir = au_hi(dir, bindex);
+ au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+ if (wbr)
+ wbr_wh_write_lock(wbr);
+ err = au_wh_init(br, sb);
+ if (wbr)
+ wbr_wh_write_unlock(wbr);
+ au_hn_inode_unlock(hdir);
+
+ if (!err && do_free) {
+ au_kfree_rcu(wbr);
+ br->br_wbr = NULL;
+ }
+ }
+
+ if (can_no_dreval)
+ au_fset_si(sbinfo, NO_DREVAL);
+ else
+ au_fclr_si(sbinfo, NO_DREVAL);
+
+ if (fhsm >= 2) {
+ au_fset_si(sbinfo, FHSM);
+ for (bindex = bbot; bindex >= 0; bindex--) {
+ br = au_sbr(sb, bindex);
+ if (au_br_fhsm(br->br_perm)) {
+ au_fhsm_set_bottom(sb, bindex);
+ break;
+ }
+ }
+ } else {
+ au_fclr_si(sbinfo, FHSM);
+ au_fhsm_set_bottom(sb, -1);
+ }
+
+ return err;
+}
+
+int au_opts_mount(struct super_block *sb, struct au_opts *opts)
+{
+ int err;
+ unsigned int tmp;
+ aufs_bindex_t bindex, bbot;
+ struct au_opt *opt;
+ struct au_opt_xino *opt_xino, xino;
+ struct au_sbinfo *sbinfo;
+ struct au_branch *br;
+ struct inode *dir;
+
+ SiMustWriteLock(sb);
+
+ err = 0;
+ opt_xino = NULL;
+ opt = opts->opt;
+ while (err >= 0 && opt->type != Opt_tail)
+ err = au_opt_simple(sb, opt++, opts);
+ if (err > 0)
+ err = 0;
+ else if (unlikely(err < 0))
+ goto out;
+
+ /* disable xino and udba temporary */
+ sbinfo = au_sbi(sb);
+ tmp = sbinfo->si_mntflags;
+ au_opt_clr(sbinfo->si_mntflags, XINO);
+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL);
+
+ opt = opts->opt;
+ while (err >= 0 && opt->type != Opt_tail)
+ err = au_opt_br(sb, opt++, opts);
+ if (err > 0)
+ err = 0;
+ else if (unlikely(err < 0))
+ goto out;
+
+ bbot = au_sbbot(sb);
+ if (unlikely(bbot < 0)) {
+ err = -EINVAL;
+ pr_err("no branches\n");
+ goto out;
+ }
+
+ if (au_opt_test(tmp, XINO))
+ au_opt_set(sbinfo->si_mntflags, XINO);
+ opt = opts->opt;
+ while (!err && opt->type != Opt_tail)
+ err = au_opt_xino(sb, opt++, &opt_xino, opts);
+ if (unlikely(err))
+ goto out;
+
+ err = au_opts_verify(sb, sb->s_flags, tmp);
+ if (unlikely(err))
+ goto out;
+
+ /* restore xino */
+ if (au_opt_test(tmp, XINO) && !opt_xino) {
+ xino.file = au_xino_def(sb);
+ err = PTR_ERR(xino.file);
+ if (IS_ERR(xino.file))
+ goto out;
+
+ err = au_xino_set(sb, &xino, /*remount*/0);
+ fput(xino.file);
+ if (unlikely(err))
+ goto out;
+ }
+
+ /* restore udba */
+ tmp &= AuOptMask_UDBA;
+ sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+ sbinfo->si_mntflags |= tmp;
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ err = au_hnotify_reset_br(tmp, br, br->br_perm);
+ if (unlikely(err))
+ AuIOErr("hnotify failed on br %d, %d, ignored\n",
+ bindex, err);
+ /* go on even if err */
+ }
+ if (au_opt_test(tmp, UDBA_HNOTIFY)) {
+ dir = d_inode(sb->s_root);
+ au_hn_reset(dir, au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO);
+ }
+
+out:
+ return err;
+}
+
+int au_opts_remount(struct super_block *sb, struct au_opts *opts)
+{
+ int err, rerr;
+ unsigned char no_dreval;
+ struct inode *dir;
+ struct au_opt_xino *opt_xino;
+ struct au_opt *opt;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ err = au_dr_opt_flush(sb);
+ if (unlikely(err))
+ goto out;
+ au_fset_opts(opts->flags, DR_FLUSHED);
+
+ dir = d_inode(sb->s_root);
+ sbinfo = au_sbi(sb);
+ opt_xino = NULL;
+ opt = opts->opt;
+ while (err >= 0 && opt->type != Opt_tail) {
+ err = au_opt_simple(sb, opt, opts);
+ if (!err)
+ err = au_opt_br(sb, opt, opts);
+ if (!err)
+ err = au_opt_xino(sb, opt, &opt_xino, opts);
+ opt++;
+ }
+ if (err > 0)
+ err = 0;
+ AuTraceErr(err);
+ /* go on even err */
+
+ no_dreval = !!au_ftest_si(sbinfo, NO_DREVAL);
+ rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0);
+ if (unlikely(rerr && !err))
+ err = rerr;
+
+ if (no_dreval != !!au_ftest_si(sbinfo, NO_DREVAL))
+ au_fset_opts(opts->flags, REFRESH_IDOP);
+
+ if (au_ftest_opts(opts->flags, TRUNC_XIB)) {
+ rerr = au_xib_trunc(sb);
+ if (unlikely(rerr && !err))
+ err = rerr;
+ }
+
+ /* will be handled by the caller */
+ if (!au_ftest_opts(opts->flags, REFRESH)
+ && (opts->given_udba
+ || au_opt_test(sbinfo->si_mntflags, XINO)
+ || au_ftest_opts(opts->flags, REFRESH_IDOP)
+ ))
+ au_fset_opts(opts->flags, REFRESH);
+
+ AuDbg("status 0x%x\n", opts->flags);
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_opt_udba(struct super_block *sb)
+{
+ return au_mntflags(sb) & AuOptMask_UDBA;
+}
diff --git a/fs/aufs/opts.h b/fs/aufs/opts.h
new file mode 100644
index 000000000000..43954ebd6192
--- /dev/null
+++ b/fs/aufs/opts.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * mount options/flags
+ */
+
+#ifndef __AUFS_OPTS_H__
+#define __AUFS_OPTS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct file;
+
+/* ---------------------------------------------------------------------- */
+
+/* mount flags */
+#define AuOpt_XINO 1 /* external inode number bitmap
+ and translation table */
+#define AuOpt_TRUNC_XINO (1 << 1) /* truncate xino files */
+#define AuOpt_UDBA_NONE (1 << 2) /* users direct branch access */
+#define AuOpt_UDBA_REVAL (1 << 3)
+#define AuOpt_UDBA_HNOTIFY (1 << 4)
+#define AuOpt_SHWH (1 << 5) /* show whiteout */
+#define AuOpt_PLINK (1 << 6) /* pseudo-link */
+#define AuOpt_DIRPERM1 (1 << 7) /* ignore the lower dir's perm
+ bits */
+#define AuOpt_ALWAYS_DIROPQ (1 << 9) /* policy to creating diropq */
+#define AuOpt_SUM (1 << 10) /* summation for statfs(2) */
+#define AuOpt_SUM_W (1 << 11) /* unimplemented */
+#define AuOpt_WARN_PERM (1 << 12) /* warn when add-branch */
+#define AuOpt_VERBOSE (1 << 13) /* busy inode when del-branch */
+#define AuOpt_DIO (1 << 14) /* direct io */
+#define AuOpt_DIRREN (1 << 15) /* directory rename */
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuOpt_UDBA_HNOTIFY
+#define AuOpt_UDBA_HNOTIFY 0
+#endif
+#ifndef CONFIG_AUFS_DIRREN
+#undef AuOpt_DIRREN
+#define AuOpt_DIRREN 0
+#endif
+#ifndef CONFIG_AUFS_SHWH
+#undef AuOpt_SHWH
+#define AuOpt_SHWH 0
+#endif
+
+#define AuOpt_Def (AuOpt_XINO \
+ | AuOpt_UDBA_REVAL \
+ | AuOpt_PLINK \
+ /* | AuOpt_DIRPERM1 */ \
+ | AuOpt_WARN_PERM)
+#define AuOptMask_UDBA (AuOpt_UDBA_NONE \
+ | AuOpt_UDBA_REVAL \
+ | AuOpt_UDBA_HNOTIFY)
+
+#define au_opt_test(flags, name) (flags & AuOpt_##name)
+#define au_opt_set(flags, name) do { \
+ BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \
+ ((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_set_udba(flags, name) do { \
+ (flags) &= ~AuOptMask_UDBA; \
+ ((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_clr(flags, name) do { \
+ ((flags) &= ~AuOpt_##name); \
+} while (0)
+
+static inline unsigned int au_opts_plink(unsigned int mntflags)
+{
+#ifdef CONFIG_PROC_FS
+ return mntflags;
+#else
+ return mntflags & ~AuOpt_PLINK;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies to select one among multiple writable branches */
+enum {
+ AuWbrCreate_TDP, /* top down parent */
+ AuWbrCreate_RR, /* round robin */
+ AuWbrCreate_MFS, /* most free space */
+ AuWbrCreate_MFSV, /* mfs with seconds */
+ AuWbrCreate_MFSRR, /* mfs then rr */
+ AuWbrCreate_MFSRRV, /* mfs then rr with seconds */
+ AuWbrCreate_TDMFS, /* top down regardless parent and mfs */
+ AuWbrCreate_TDMFSV, /* top down regardless parent and mfs */
+ AuWbrCreate_PMFS, /* parent and mfs */
+ AuWbrCreate_PMFSV, /* parent and mfs with seconds */
+ AuWbrCreate_PMFSRR, /* parent, mfs and round-robin */
+ AuWbrCreate_PMFSRRV, /* plus seconds */
+
+ AuWbrCreate_Def = AuWbrCreate_TDP
+};
+
+enum {
+ AuWbrCopyup_TDP, /* top down parent */
+ AuWbrCopyup_BUP, /* bottom up parent */
+ AuWbrCopyup_BU, /* bottom up */
+
+ AuWbrCopyup_Def = AuWbrCopyup_TDP
+};
+
+/* ---------------------------------------------------------------------- */
+
+struct au_opt_add {
+ aufs_bindex_t bindex;
+ char *pathname;
+ int perm;
+ struct path path;
+};
+
+struct au_opt_del {
+ char *pathname;
+ struct path h_path;
+};
+
+struct au_opt_mod {
+ char *path;
+ int perm;
+ struct dentry *h_root;
+};
+
+struct au_opt_xino {
+ char *path;
+ struct file *file;
+};
+
+struct au_opt_xino_itrunc {
+ aufs_bindex_t bindex;
+};
+
+struct au_opt_wbr_create {
+ int wbr_create;
+ int mfs_second;
+ unsigned long long mfsrr_watermark;
+};
+
+struct au_opt {
+ int type;
+ union {
+ struct au_opt_xino xino;
+ struct au_opt_xino_itrunc xino_itrunc;
+ struct au_opt_add add;
+ struct au_opt_del del;
+ struct au_opt_mod mod;
+ int dirwh;
+ int rdcache;
+ unsigned int rdblk;
+ unsigned int rdhash;
+ int udba;
+ struct au_opt_wbr_create wbr_create;
+ int wbr_copyup;
+ unsigned int fhsm_second;
+ };
+};
+
+/* opts flags */
+#define AuOpts_REMOUNT 1
+#define AuOpts_REFRESH (1 << 1)
+#define AuOpts_TRUNC_XIB (1 << 2)
+#define AuOpts_REFRESH_DYAOP (1 << 3)
+#define AuOpts_REFRESH_IDOP (1 << 4)
+#define AuOpts_DR_FLUSHED (1 << 5)
+#define au_ftest_opts(flags, name) ((flags) & AuOpts_##name)
+#define au_fset_opts(flags, name) \
+ do { (flags) |= AuOpts_##name; } while (0)
+#define au_fclr_opts(flags, name) \
+ do { (flags) &= ~AuOpts_##name; } while (0)
+
+#ifndef CONFIG_AUFS_DIRREN
+#undef AuOpts_DR_FLUSHED
+#define AuOpts_DR_FLUSHED 0
+#endif
+
+struct au_opts {
+ struct au_opt *opt;
+ int max_opt;
+
+ unsigned int given_udba;
+ unsigned int flags;
+ unsigned long sb_flags;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* opts.c */
+void au_optstr_br_perm(au_br_perm_str_t *str, int perm);
+const char *au_optstr_udba(int udba);
+const char *au_optstr_wbr_copyup(int wbr_copyup);
+const char *au_optstr_wbr_create(int wbr_create);
+
+void au_opts_free(struct au_opts *opts);
+struct super_block;
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts);
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+ unsigned int pending);
+int au_opts_mount(struct super_block *sb, struct au_opts *opts);
+int au_opts_remount(struct super_block *sb, struct au_opts *opts);
+
+unsigned int au_opt_udba(struct super_block *sb);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_OPTS_H__ */
diff --git a/fs/aufs/plink.c b/fs/aufs/plink.c
new file mode 100644
index 000000000000..718edb8827a3
--- /dev/null
+++ b/fs/aufs/plink.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * pseudo-link
+ */
+
+#include "aufs.h"
+
+/*
+ * the pseudo-link maintenance mode.
+ * during a user process maintains the pseudo-links,
+ * prohibit adding a new plink and branch manipulation.
+ *
+ * Flags
+ * NOPLM:
+ * For entry functions which will handle plink, and i_mutex is already held
+ * in VFS.
+ * They cannot wait and should return an error at once.
+ * Callers has to check the error.
+ * NOPLMW:
+ * For entry functions which will handle plink, but i_mutex is not held
+ * in VFS.
+ * They can wait the plink maintenance mode to finish.
+ *
+ * They behave like F_SETLK and F_SETLKW.
+ * If the caller never handle plink, then both flags are unnecessary.
+ */
+
+int au_plink_maint(struct super_block *sb, int flags)
+{
+ int err;
+ pid_t pid, ppid;
+ struct task_struct *parent, *prev;
+ struct au_sbinfo *sbi;
+
+ SiMustAnyLock(sb);
+
+ err = 0;
+ if (!au_opt_test(au_mntflags(sb), PLINK))
+ goto out;
+
+ sbi = au_sbi(sb);
+ pid = sbi->si_plink_maint_pid;
+ if (!pid || pid == current->pid)
+ goto out;
+
+ /* todo: it highly depends upon /sbin/mount.aufs */
+ prev = NULL;
+ parent = current;
+ ppid = 0;
+ rcu_read_lock();
+ while (1) {
+ parent = rcu_dereference(parent->real_parent);
+ if (parent == prev)
+ break;
+ ppid = task_pid_vnr(parent);
+ if (pid == ppid) {
+ rcu_read_unlock();
+ goto out;
+ }
+ prev = parent;
+ }
+ rcu_read_unlock();
+
+ if (au_ftest_lock(flags, NOPLMW)) {
+ /* if there is no i_mutex lock in VFS, we don't need to wait */
+ /* AuDebugOn(!lockdep_depth(current)); */
+ while (sbi->si_plink_maint_pid) {
+ si_read_unlock(sb);
+ /* gave up wake_up_bit() */
+ wait_event(sbi->si_plink_wq, !sbi->si_plink_maint_pid);
+
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&sbi->si_nowait);
+ si_noflush_read_lock(sb);
+ }
+ } else if (au_ftest_lock(flags, NOPLM)) {
+ AuDbg("ppid %d, pid %d\n", ppid, pid);
+ err = -EAGAIN;
+ }
+
+out:
+ return err;
+}
+
+void au_plink_maint_leave(struct au_sbinfo *sbinfo)
+{
+ spin_lock(&sbinfo->si_plink_maint_lock);
+ sbinfo->si_plink_maint_pid = 0;
+ spin_unlock(&sbinfo->si_plink_maint_lock);
+ wake_up_all(&sbinfo->si_plink_wq);
+}
+
+int au_plink_maint_enter(struct super_block *sb)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ /* make sure i am the only one in this fs */
+ si_write_lock(sb, AuLock_FLUSH);
+ if (au_opt_test(au_mntflags(sb), PLINK)) {
+ spin_lock(&sbinfo->si_plink_maint_lock);
+ if (!sbinfo->si_plink_maint_pid)
+ sbinfo->si_plink_maint_pid = current->pid;
+ else
+ err = -EBUSY;
+ spin_unlock(&sbinfo->si_plink_maint_lock);
+ }
+ si_write_unlock(sb);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb)
+{
+ int i;
+ struct au_sbinfo *sbinfo;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos;
+ struct au_icntnr *icntnr;
+
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+ for (i = 0; i < AuPlink_NHASH; i++) {
+ hbl = sbinfo->si_plink + i;
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry(icntnr, pos, hbl, plink)
+ AuDbg("%lu\n", icntnr->vfs_inode.i_ino);
+ hlist_bl_unlock(hbl);
+ }
+}
+#endif
+
+/* is the inode pseudo-linked? */
+int au_plink_test(struct inode *inode)
+{
+ int found, i;
+ struct au_sbinfo *sbinfo;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos;
+ struct au_icntnr *icntnr;
+
+ sbinfo = au_sbi(inode->i_sb);
+ AuRwMustAnyLock(&sbinfo->si_rwsem);
+ AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK));
+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+ found = 0;
+ i = au_plink_hash(inode->i_ino);
+ hbl = sbinfo->si_plink + i;
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry(icntnr, pos, hbl, plink)
+ if (&icntnr->vfs_inode == inode) {
+ found = 1;
+ break;
+ }
+ hlist_bl_unlock(hbl);
+ return found;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generate a name for plink.
+ * the file will be stored under AUFS_WH_PLINKDIR.
+ */
+/* 20 is max digits length of ulong 64 */
+#define PLINK_NAME_LEN ((20 + 1) * 2)
+
+static int plink_name(char *name, int len, struct inode *inode,
+ aufs_bindex_t bindex)
+{
+ int rlen;
+ struct inode *h_inode;
+
+ h_inode = au_h_iptr(inode, bindex);
+ rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino);
+ return rlen;
+}
+
+struct au_do_plink_lkup_args {
+ struct dentry **errp;
+ struct qstr *tgtname;
+ struct dentry *h_parent;
+ struct au_branch *br;
+};
+
+static struct dentry *au_do_plink_lkup(struct qstr *tgtname,
+ struct dentry *h_parent,
+ struct au_branch *br)
+{
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+
+ h_inode = d_inode(h_parent);
+ inode_lock_shared_nested(h_inode, AuLsc_I_CHILD2);
+ h_dentry = vfsub_lkup_one(tgtname, h_parent);
+ inode_unlock_shared(h_inode);
+ return h_dentry;
+}
+
+static void au_call_do_plink_lkup(void *args)
+{
+ struct au_do_plink_lkup_args *a = args;
+ *a->errp = au_do_plink_lkup(a->tgtname, a->h_parent, a->br);
+}
+
+/* lookup the plink-ed @inode under the branch at @bindex */
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex)
+{
+ struct dentry *h_dentry, *h_parent;
+ struct au_branch *br;
+ int wkq_err;
+ char a[PLINK_NAME_LEN];
+ struct qstr tgtname = QSTR_INIT(a, 0);
+
+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+ br = au_sbr(inode->i_sb, bindex);
+ h_parent = br->br_wbr->wbr_plink;
+ tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
+ struct au_do_plink_lkup_args args = {
+ .errp = &h_dentry,
+ .tgtname = &tgtname,
+ .h_parent = h_parent,
+ .br = br
+ };
+
+ wkq_err = au_wkq_wait(au_call_do_plink_lkup, &args);
+ if (unlikely(wkq_err))
+ h_dentry = ERR_PTR(wkq_err);
+ } else
+ h_dentry = au_do_plink_lkup(&tgtname, h_parent, br);
+
+ return h_dentry;
+}
+
+/* create a pseudo-link */
+static int do_whplink(struct qstr *tgt, struct dentry *h_parent,
+ struct dentry *h_dentry, struct au_branch *br)
+{
+ int err;
+ struct path h_path = {
+ .mnt = au_br_mnt(br)
+ };
+ struct inode *h_dir, *delegated;
+
+ h_dir = d_inode(h_parent);
+ inode_lock_nested(h_dir, AuLsc_I_CHILD2);
+again:
+ h_path.dentry = vfsub_lkup_one(tgt, h_parent);
+ err = PTR_ERR(h_path.dentry);
+ if (IS_ERR(h_path.dentry))
+ goto out;
+
+ err = 0;
+ /* wh.plink dir is not monitored */
+ /* todo: is it really safe? */
+ if (d_is_positive(h_path.dentry)
+ && d_inode(h_path.dentry) != d_inode(h_dentry)) {
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, &h_path, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ dput(h_path.dentry);
+ h_path.dentry = NULL;
+ if (!err)
+ goto again;
+ }
+ if (!err && d_is_negative(h_path.dentry)) {
+ delegated = NULL;
+ err = vfsub_link(h_dentry, h_dir, &h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ }
+ dput(h_path.dentry);
+
+out:
+ inode_unlock(h_dir);
+ return err;
+}
+
+struct do_whplink_args {
+ int *errp;
+ struct qstr *tgt;
+ struct dentry *h_parent;
+ struct dentry *h_dentry;
+ struct au_branch *br;
+};
+
+static void call_do_whplink(void *args)
+{
+ struct do_whplink_args *a = args;
+ *a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br);
+}
+
+static int whplink(struct dentry *h_dentry, struct inode *inode,
+ aufs_bindex_t bindex, struct au_branch *br)
+{
+ int err, wkq_err;
+ struct au_wbr *wbr;
+ struct dentry *h_parent;
+ char a[PLINK_NAME_LEN];
+ struct qstr tgtname = QSTR_INIT(a, 0);
+
+ wbr = au_sbr(inode->i_sb, bindex)->br_wbr;
+ h_parent = wbr->wbr_plink;
+ tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+ /* always superio. */
+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
+ struct do_whplink_args args = {
+ .errp = &err,
+ .tgt = &tgtname,
+ .h_parent = h_parent,
+ .h_dentry = h_dentry,
+ .br = br
+ };
+ wkq_err = au_wkq_wait(call_do_whplink, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ } else
+ err = do_whplink(&tgtname, h_parent, h_dentry, br);
+
+ return err;
+}
+
+/*
+ * create a new pseudo-link for @h_dentry on @bindex.
+ * the linked inode is held in aufs @inode.
+ */
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_dentry)
+{
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos;
+ struct au_icntnr *icntnr;
+ int found, err, cnt, i;
+
+ sb = inode->i_sb;
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+ found = au_plink_test(inode);
+ if (found)
+ return;
+
+ i = au_plink_hash(inode->i_ino);
+ hbl = sbinfo->si_plink + i;
+ au_igrab(inode);
+
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry(icntnr, pos, hbl, plink) {
+ if (&icntnr->vfs_inode == inode) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ icntnr = container_of(inode, struct au_icntnr, vfs_inode);
+ hlist_bl_add_head(&icntnr->plink, hbl);
+ }
+ hlist_bl_unlock(hbl);
+ if (!found) {
+ cnt = au_hbl_count(hbl);
+#define msg "unexpectedly unbalanced or too many pseudo-links"
+ if (cnt > AUFS_PLINK_WARN)
+ AuWarn1(msg ", %d\n", cnt);
+#undef msg
+ err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex));
+ if (unlikely(err)) {
+ pr_warn("err %d, damaged pseudo link.\n", err);
+ au_hbl_del(&icntnr->plink, hbl);
+ iput(&icntnr->vfs_inode);
+ }
+ } else
+ iput(&icntnr->vfs_inode);
+}
+
+/* free all plinks */
+void au_plink_put(struct super_block *sb, int verbose)
+{
+ int i, warned;
+ struct au_sbinfo *sbinfo;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos, *tmp;
+ struct au_icntnr *icntnr;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+ /* no spin_lock since sbinfo is write-locked */
+ warned = 0;
+ for (i = 0; i < AuPlink_NHASH; i++) {
+ hbl = sbinfo->si_plink + i;
+ if (!warned && verbose && !hlist_bl_empty(hbl)) {
+ pr_warn("pseudo-link is not flushed");
+ warned = 1;
+ }
+ hlist_bl_for_each_entry_safe(icntnr, pos, tmp, hbl, plink)
+ iput(&icntnr->vfs_inode);
+ INIT_HLIST_BL_HEAD(hbl);
+ }
+}
+
+void au_plink_clean(struct super_block *sb, int verbose)
+{
+ struct dentry *root;
+
+ root = sb->s_root;
+ aufs_write_lock(root);
+ if (au_opt_test(au_mntflags(sb), PLINK))
+ au_plink_put(sb, verbose);
+ aufs_write_unlock(root);
+}
+
+static int au_plink_do_half_refresh(struct inode *inode, aufs_bindex_t br_id)
+{
+ int do_put;
+ aufs_bindex_t btop, bbot, bindex;
+
+ do_put = 0;
+ btop = au_ibtop(inode);
+ bbot = au_ibbot(inode);
+ if (btop >= 0) {
+ for (bindex = btop; bindex <= bbot; bindex++) {
+ if (!au_h_iptr(inode, bindex)
+ || au_ii_br_id(inode, bindex) != br_id)
+ continue;
+ au_set_h_iptr(inode, bindex, NULL, 0);
+ do_put = 1;
+ break;
+ }
+ if (do_put)
+ for (bindex = btop; bindex <= bbot; bindex++)
+ if (au_h_iptr(inode, bindex)) {
+ do_put = 0;
+ break;
+ }
+ } else
+ do_put = 1;
+
+ return do_put;
+}
+
+/* free the plinks on a branch specified by @br_id */
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id)
+{
+ struct au_sbinfo *sbinfo;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos, *tmp;
+ struct au_icntnr *icntnr;
+ struct inode *inode;
+ int i, do_put;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+ /* no bit_lock since sbinfo is write-locked */
+ for (i = 0; i < AuPlink_NHASH; i++) {
+ hbl = sbinfo->si_plink + i;
+ hlist_bl_for_each_entry_safe(icntnr, pos, tmp, hbl, plink) {
+ inode = au_igrab(&icntnr->vfs_inode);
+ ii_write_lock_child(inode);
+ do_put = au_plink_do_half_refresh(inode, br_id);
+ if (do_put) {
+ hlist_bl_del(&icntnr->plink);
+ iput(inode);
+ }
+ ii_write_unlock(inode);
+ iput(inode);
+ }
+ }
+}
diff --git a/fs/aufs/poll.c b/fs/aufs/poll.c
new file mode 100644
index 000000000000..c48c6cb6491f
--- /dev/null
+++ b/fs/aufs/poll.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * poll operation
+ * There is only one filesystem which implements ->poll operation, currently.
+ */
+
+#include "aufs.h"
+
+__poll_t aufs_poll(struct file *file, struct poll_table_struct *pt)
+{
+ __poll_t mask;
+ struct file *h_file;
+ struct super_block *sb;
+
+ /* We should pretend an error happened. */
+ mask = EPOLLERR /* | EPOLLIN | EPOLLOUT */;
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+ if (IS_ERR(h_file)) {
+ AuDbg("h_file %ld\n", PTR_ERR(h_file));
+ goto out;
+ }
+
+ mask = vfs_poll(h_file, pt);
+ fput(h_file); /* instead of au_read_post() */
+
+out:
+ si_read_unlock(sb);
+ if (mask & EPOLLERR)
+ AuDbg("mask 0x%x\n", mask);
+ return mask;
+}
diff --git a/fs/aufs/posix_acl.c b/fs/aufs/posix_acl.c
new file mode 100644
index 000000000000..67a4bbffdf5f
--- /dev/null
+++ b/fs/aufs/posix_acl.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * posix acl operations
+ */
+
+#include <linux/fs.h>
+#include "aufs.h"
+
+struct posix_acl *aufs_get_acl(struct inode *inode, int type)
+{
+ struct posix_acl *acl;
+ int err;
+ aufs_bindex_t bindex;
+ struct inode *h_inode;
+ struct super_block *sb;
+
+ acl = NULL;
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ ii_read_lock_child(inode);
+ if (!(sb->s_flags & SB_POSIXACL))
+ goto out;
+
+ bindex = au_ibtop(inode);
+ h_inode = au_h_iptr(inode, bindex);
+ if (unlikely(!h_inode
+ || ((h_inode->i_mode & S_IFMT)
+ != (inode->i_mode & S_IFMT)))) {
+ err = au_busy_or_stale();
+ acl = ERR_PTR(err);
+ goto out;
+ }
+
+ /* always topmost only */
+ acl = get_acl(h_inode, type);
+ if (!IS_ERR_OR_NULL(acl))
+ set_cached_acl(inode, type, acl);
+
+out:
+ ii_read_unlock(inode);
+ si_read_unlock(sb);
+
+ AuTraceErrPtr(acl);
+ return acl;
+}
+
+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ int err;
+ ssize_t ssz;
+ struct dentry *dentry;
+ struct au_sxattr arg = {
+ .type = AU_ACL_SET,
+ .u.acl_set = {
+ .acl = acl,
+ .type = type
+ },
+ };
+
+ IMustLock(inode);
+
+ if (inode->i_ino == AUFS_ROOT_INO)
+ dentry = dget(inode->i_sb->s_root);
+ else {
+ dentry = d_find_alias(inode);
+ if (!dentry)
+ dentry = d_find_any_alias(inode);
+ if (!dentry) {
+ pr_warn("cannot handle this inode, "
+ "please report to aufs-users ML\n");
+ err = -ENOENT;
+ goto out;
+ }
+ }
+
+ ssz = au_sxattr(dentry, inode, &arg);
+ dput(dentry);
+ err = ssz;
+ if (ssz >= 0) {
+ err = 0;
+ set_cached_acl(inode, type, acl);
+ }
+
+out:
+ return err;
+}
diff --git a/fs/aufs/procfs.c b/fs/aufs/procfs.c
new file mode 100644
index 000000000000..29c6377e888c
--- /dev/null
+++ b/fs/aufs/procfs.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * procfs interfaces
+ */
+
+#include <linux/proc_fs.h>
+#include "aufs.h"
+
+static int au_procfs_plm_release(struct inode *inode, struct file *file)
+{
+ struct au_sbinfo *sbinfo;
+
+ sbinfo = file->private_data;
+ if (sbinfo) {
+ au_plink_maint_leave(sbinfo);
+ kobject_put(&sbinfo->si_kobj);
+ }
+
+ return 0;
+}
+
+static void au_procfs_plm_write_clean(struct file *file)
+{
+ struct au_sbinfo *sbinfo;
+
+ sbinfo = file->private_data;
+ if (sbinfo)
+ au_plink_clean(sbinfo->si_sb, /*verbose*/0);
+}
+
+static int au_procfs_plm_write_si(struct file *file, unsigned long id)
+{
+ int err;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+ struct hlist_bl_node *pos;
+
+ err = -EBUSY;
+ if (unlikely(file->private_data))
+ goto out;
+
+ sb = NULL;
+ /* don't use au_sbilist_lock() here */
+ hlist_bl_lock(&au_sbilist);
+ hlist_bl_for_each_entry(sbinfo, pos, &au_sbilist, si_list)
+ if (id == sysaufs_si_id(sbinfo)) {
+ kobject_get(&sbinfo->si_kobj);
+ sb = sbinfo->si_sb;
+ break;
+ }
+ hlist_bl_unlock(&au_sbilist);
+
+ err = -EINVAL;
+ if (unlikely(!sb))
+ goto out;
+
+ err = au_plink_maint_enter(sb);
+ if (!err)
+ /* keep kobject_get() */
+ file->private_data = sbinfo;
+ else
+ kobject_put(&sbinfo->si_kobj);
+out:
+ return err;
+}
+
+/*
+ * Accept a valid "si=xxxx" only.
+ * Once it is accepted successfully, accept "clean" too.
+ */
+static ssize_t au_procfs_plm_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t err;
+ unsigned long id;
+ /* last newline is allowed */
+ char buf[3 + sizeof(unsigned long) * 2 + 1];
+
+ err = -EACCES;
+ if (unlikely(!capable(CAP_SYS_ADMIN)))
+ goto out;
+
+ err = -EINVAL;
+ if (unlikely(count > sizeof(buf)))
+ goto out;
+
+ err = copy_from_user(buf, ubuf, count);
+ if (unlikely(err)) {
+ err = -EFAULT;
+ goto out;
+ }
+ buf[count] = 0;
+
+ err = -EINVAL;
+ if (!strcmp("clean", buf)) {
+ au_procfs_plm_write_clean(file);
+ goto out_success;
+ } else if (unlikely(strncmp("si=", buf, 3)))
+ goto out;
+
+ err = kstrtoul(buf + 3, 16, &id);
+ if (unlikely(err))
+ goto out;
+
+ err = au_procfs_plm_write_si(file, id);
+ if (unlikely(err))
+ goto out;
+
+out_success:
+ err = count; /* success */
+out:
+ return err;
+}
+
+static const struct file_operations au_procfs_plm_fop = {
+ .write = au_procfs_plm_write,
+ .release = au_procfs_plm_release,
+ .owner = THIS_MODULE
+};
+
+/* ---------------------------------------------------------------------- */
+
+static struct proc_dir_entry *au_procfs_dir;
+
+void au_procfs_fin(void)
+{
+ remove_proc_entry(AUFS_PLINK_MAINT_NAME, au_procfs_dir);
+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+}
+
+int __init au_procfs_init(void)
+{
+ int err;
+ struct proc_dir_entry *entry;
+
+ err = -ENOMEM;
+ au_procfs_dir = proc_mkdir(AUFS_PLINK_MAINT_DIR, NULL);
+ if (unlikely(!au_procfs_dir))
+ goto out;
+
+ entry = proc_create(AUFS_PLINK_MAINT_NAME, S_IFREG | 0200,
+ au_procfs_dir, &au_procfs_plm_fop);
+ if (unlikely(!entry))
+ goto out_dir;
+
+ err = 0;
+ goto out; /* success */
+
+
+out_dir:
+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+out:
+ return err;
+}
diff --git a/fs/aufs/rdu.c b/fs/aufs/rdu.c
new file mode 100644
index 000000000000..bf0233a2887d
--- /dev/null
+++ b/fs/aufs/rdu.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * readdir in userspace.
+ */
+
+#include <linux/compat.h>
+#include <linux/fs_stack.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+/* bits for struct aufs_rdu.flags */
+#define AuRdu_CALLED 1
+#define AuRdu_CONT (1 << 1)
+#define AuRdu_FULL (1 << 2)
+#define au_ftest_rdu(flags, name) ((flags) & AuRdu_##name)
+#define au_fset_rdu(flags, name) \
+ do { (flags) |= AuRdu_##name; } while (0)
+#define au_fclr_rdu(flags, name) \
+ do { (flags) &= ~AuRdu_##name; } while (0)
+
+struct au_rdu_arg {
+ struct dir_context ctx;
+ struct aufs_rdu *rdu;
+ union au_rdu_ent_ul ent;
+ unsigned long end;
+
+ struct super_block *sb;
+ int err;
+};
+
+static int au_rdu_fill(struct dir_context *ctx, const char *name, int nlen,
+ loff_t offset, u64 h_ino, unsigned int d_type)
+{
+ int err, len;
+ struct au_rdu_arg *arg = container_of(ctx, struct au_rdu_arg, ctx);
+ struct aufs_rdu *rdu = arg->rdu;
+ struct au_rdu_ent ent;
+
+ err = 0;
+ arg->err = 0;
+ au_fset_rdu(rdu->cookie.flags, CALLED);
+ len = au_rdu_len(nlen);
+ if (arg->ent.ul + len < arg->end) {
+ ent.ino = h_ino;
+ ent.bindex = rdu->cookie.bindex;
+ ent.type = d_type;
+ ent.nlen = nlen;
+ if (unlikely(nlen > AUFS_MAX_NAMELEN))
+ ent.type = DT_UNKNOWN;
+
+ /* unnecessary to support mmap_sem since this is a dir */
+ err = -EFAULT;
+ if (copy_to_user(arg->ent.e, &ent, sizeof(ent)))
+ goto out;
+ if (copy_to_user(arg->ent.e->name, name, nlen))
+ goto out;
+ /* the terminating NULL */
+ if (__put_user(0, arg->ent.e->name + nlen))
+ goto out;
+ err = 0;
+ /* AuDbg("%p, %.*s\n", arg->ent.p, nlen, name); */
+ arg->ent.ul += len;
+ rdu->rent++;
+ } else {
+ err = -EFAULT;
+ au_fset_rdu(rdu->cookie.flags, FULL);
+ rdu->full = 1;
+ rdu->tail = arg->ent;
+ }
+
+out:
+ /* AuTraceErr(err); */
+ return err;
+}
+
+static int au_rdu_do(struct file *h_file, struct au_rdu_arg *arg)
+{
+ int err;
+ loff_t offset;
+ struct au_rdu_cookie *cookie = &arg->rdu->cookie;
+
+ /* we don't have to care (FMODE_32BITHASH | FMODE_64BITHASH) for ext4 */
+ offset = vfsub_llseek(h_file, cookie->h_pos, SEEK_SET);
+ err = offset;
+ if (unlikely(offset != cookie->h_pos))
+ goto out;
+
+ err = 0;
+ do {
+ arg->err = 0;
+ au_fclr_rdu(cookie->flags, CALLED);
+ /* smp_mb(); */
+ err = vfsub_iterate_dir(h_file, &arg->ctx);
+ if (err >= 0)
+ err = arg->err;
+ } while (!err
+ && au_ftest_rdu(cookie->flags, CALLED)
+ && !au_ftest_rdu(cookie->flags, FULL));
+ cookie->h_pos = h_file->f_pos;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_rdu(struct file *file, struct aufs_rdu *rdu)
+{
+ int err;
+ aufs_bindex_t bbot;
+ struct au_rdu_arg arg = {
+ .ctx = {
+ .actor = au_rdu_fill
+ }
+ };
+ struct dentry *dentry;
+ struct inode *inode;
+ struct file *h_file;
+ struct au_rdu_cookie *cookie = &rdu->cookie;
+
+ err = !access_ok(VERIFY_WRITE, rdu->ent.e, rdu->sz);
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out;
+ }
+ rdu->rent = 0;
+ rdu->tail = rdu->ent;
+ rdu->full = 0;
+ arg.rdu = rdu;
+ arg.ent = rdu->ent;
+ arg.end = arg.ent.ul;
+ arg.end += rdu->sz;
+
+ err = -ENOTDIR;
+ if (unlikely(!file->f_op->iterate && !file->f_op->iterate_shared))
+ goto out;
+
+ err = security_file_permission(file, MAY_READ);
+ AuTraceErr(err);
+ if (unlikely(err))
+ goto out;
+
+ dentry = file->f_path.dentry;
+ inode = d_inode(dentry);
+ inode_lock_shared(inode);
+
+ arg.sb = inode->i_sb;
+ err = si_read_lock(arg.sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out_mtx;
+ err = au_alive_dir(dentry);
+ if (unlikely(err))
+ goto out_si;
+ /* todo: reval? */
+ fi_read_lock(file);
+
+ err = -EAGAIN;
+ if (unlikely(au_ftest_rdu(cookie->flags, CONT)
+ && cookie->generation != au_figen(file)))
+ goto out_unlock;
+
+ err = 0;
+ if (!rdu->blk) {
+ rdu->blk = au_sbi(arg.sb)->si_rdblk;
+ if (!rdu->blk)
+ rdu->blk = au_dir_size(file, /*dentry*/NULL);
+ }
+ bbot = au_fbtop(file);
+ if (cookie->bindex < bbot)
+ cookie->bindex = bbot;
+ bbot = au_fbbot_dir(file);
+ /* AuDbg("b%d, b%d\n", cookie->bindex, bbot); */
+ for (; !err && cookie->bindex <= bbot;
+ cookie->bindex++, cookie->h_pos = 0) {
+ h_file = au_hf_dir(file, cookie->bindex);
+ if (!h_file)
+ continue;
+
+ au_fclr_rdu(cookie->flags, FULL);
+ err = au_rdu_do(h_file, &arg);
+ AuTraceErr(err);
+ if (unlikely(au_ftest_rdu(cookie->flags, FULL) || err))
+ break;
+ }
+ AuDbg("rent %llu\n", rdu->rent);
+
+ if (!err && !au_ftest_rdu(cookie->flags, CONT)) {
+ rdu->shwh = !!au_opt_test(au_sbi(arg.sb)->si_mntflags, SHWH);
+ au_fset_rdu(cookie->flags, CONT);
+ cookie->generation = au_figen(file);
+ }
+
+ ii_read_lock_child(inode);
+ fsstack_copy_attr_atime(inode, au_h_iptr(inode, au_ibtop(inode)));
+ ii_read_unlock(inode);
+
+out_unlock:
+ fi_read_unlock(file);
+out_si:
+ si_read_unlock(arg.sb);
+out_mtx:
+ inode_unlock_shared(inode);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_rdu_ino(struct file *file, struct aufs_rdu *rdu)
+{
+ int err;
+ ino_t ino;
+ unsigned long long nent;
+ union au_rdu_ent_ul *u;
+ struct au_rdu_ent ent;
+ struct super_block *sb;
+
+ err = 0;
+ nent = rdu->nent;
+ u = &rdu->ent;
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ while (nent-- > 0) {
+ /* unnecessary to support mmap_sem since this is a dir */
+ err = copy_from_user(&ent, u->e, sizeof(ent));
+ if (!err)
+ err = !access_ok(VERIFY_WRITE, &u->e->ino, sizeof(ino));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ break;
+ }
+
+ /* AuDbg("b%d, i%llu\n", ent.bindex, ent.ino); */
+ if (!ent.wh)
+ err = au_ino(sb, ent.bindex, ent.ino, ent.type, &ino);
+ else
+ err = au_wh_ino(sb, ent.bindex, ent.ino, ent.type,
+ &ino);
+ if (unlikely(err)) {
+ AuTraceErr(err);
+ break;
+ }
+
+ err = __put_user(ino, &u->e->ino);
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ break;
+ }
+ u->ul += au_rdu_len(ent.nlen);
+ }
+ si_read_unlock(sb);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_rdu_verify(struct aufs_rdu *rdu)
+{
+ AuDbg("rdu{%llu, %p, %u | %u | %llu, %u, %u | "
+ "%llu, b%d, 0x%x, g%u}\n",
+ rdu->sz, rdu->ent.e, rdu->verify[AufsCtlRduV_SZ],
+ rdu->blk,
+ rdu->rent, rdu->shwh, rdu->full,
+ rdu->cookie.h_pos, rdu->cookie.bindex, rdu->cookie.flags,
+ rdu->cookie.generation);
+
+ if (rdu->verify[AufsCtlRduV_SZ] == sizeof(*rdu))
+ return 0;
+
+ AuDbg("%u:%u\n",
+ rdu->verify[AufsCtlRduV_SZ], (unsigned int)sizeof(*rdu));
+ return -EINVAL;
+}
+
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long err, e;
+ struct aufs_rdu rdu;
+ void __user *p = (void __user *)arg;
+
+ err = copy_from_user(&rdu, p, sizeof(rdu));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out;
+ }
+ err = au_rdu_verify(&rdu);
+ if (unlikely(err))
+ goto out;
+
+ switch (cmd) {
+ case AUFS_CTL_RDU:
+ err = au_rdu(file, &rdu);
+ if (unlikely(err))
+ break;
+
+ e = copy_to_user(p, &rdu, sizeof(rdu));
+ if (unlikely(e)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ }
+ break;
+ case AUFS_CTL_RDU_INO:
+ err = au_rdu_ino(file, &rdu);
+ break;
+
+ default:
+ /* err = -ENOTTY; */
+ err = -EINVAL;
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long err, e;
+ struct aufs_rdu rdu;
+ void __user *p = compat_ptr(arg);
+
+ /* todo: get_user()? */
+ err = copy_from_user(&rdu, p, sizeof(rdu));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out;
+ }
+ rdu.ent.e = compat_ptr(rdu.ent.ul);
+ err = au_rdu_verify(&rdu);
+ if (unlikely(err))
+ goto out;
+
+ switch (cmd) {
+ case AUFS_CTL_RDU:
+ err = au_rdu(file, &rdu);
+ if (unlikely(err))
+ break;
+
+ rdu.ent.ul = ptr_to_compat(rdu.ent.e);
+ rdu.tail.ul = ptr_to_compat(rdu.tail.e);
+ e = copy_to_user(p, &rdu, sizeof(rdu));
+ if (unlikely(e)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ }
+ break;
+ case AUFS_CTL_RDU_INO:
+ err = au_rdu_ino(file, &rdu);
+ break;
+
+ default:
+ /* err = -ENOTTY; */
+ err = -EINVAL;
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+#endif
diff --git a/fs/aufs/rwsem.h b/fs/aufs/rwsem.h
new file mode 100644
index 000000000000..2f8988fa1bda
--- /dev/null
+++ b/fs/aufs/rwsem.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * simple read-write semaphore wrappers
+ */
+
+#ifndef __AUFS_RWSEM_H__
+#define __AUFS_RWSEM_H__
+
+#ifdef __KERNEL__
+
+#include "debug.h"
+
+/* in the future, the name 'au_rwsem' will be totally gone */
+#define au_rwsem rw_semaphore
+
+/* to debug easier, do not make them inlined functions */
+#define AuRwMustNoWaiters(rw) AuDebugOn(rwsem_is_contended(rw))
+/* rwsem_is_locked() is unusable */
+#define AuRwMustReadLock(rw) AuDebugOn(!lockdep_recursing(current) \
+ && debug_locks \
+ && !lockdep_is_held_type(rw, 1))
+#define AuRwMustWriteLock(rw) AuDebugOn(!lockdep_recursing(current) \
+ && debug_locks \
+ && !lockdep_is_held_type(rw, 0))
+#define AuRwMustAnyLock(rw) AuDebugOn(!lockdep_recursing(current) \
+ && debug_locks \
+ && !lockdep_is_held(rw))
+#define AuRwDestroy(rw) AuDebugOn(!lockdep_recursing(current) \
+ && debug_locks \
+ && lockdep_is_held(rw))
+
+#define au_rw_init(rw) init_rwsem(rw)
+
+#define au_rw_init_wlock(rw) do { \
+ au_rw_init(rw); \
+ down_write(rw); \
+ } while (0)
+
+#define au_rw_init_wlock_nested(rw, lsc) do { \
+ au_rw_init(rw); \
+ down_write_nested(rw, lsc); \
+ } while (0)
+
+#define au_rw_read_lock(rw) down_read(rw)
+#define au_rw_read_lock_nested(rw, lsc) down_read_nested(rw, lsc)
+#define au_rw_read_unlock(rw) up_read(rw)
+#define au_rw_dgrade_lock(rw) downgrade_write(rw)
+#define au_rw_write_lock(rw) down_write(rw)
+#define au_rw_write_lock_nested(rw, lsc) down_write_nested(rw, lsc)
+#define au_rw_write_unlock(rw) up_write(rw)
+/* why is not _nested version defined? */
+#define au_rw_read_trylock(rw) down_read_trylock(rw)
+#define au_rw_write_trylock(rw) down_write_trylock(rw)
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_RWSEM_H__ */
diff --git a/fs/aufs/sbinfo.c b/fs/aufs/sbinfo.c
new file mode 100644
index 000000000000..803f6b11969a
--- /dev/null
+++ b/fs/aufs/sbinfo.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * superblock private data
+ */
+
+#include "aufs.h"
+
+/*
+ * they are necessary regardless sysfs is disabled.
+ */
+void au_si_free(struct kobject *kobj)
+{
+ int i;
+ struct au_sbinfo *sbinfo;
+ char *locked __maybe_unused; /* debug only */
+
+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+ for (i = 0; i < AuPlink_NHASH; i++)
+ AuDebugOn(!hlist_bl_empty(sbinfo->si_plink + i));
+ AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len));
+
+ AuLCntZero(au_lcnt_read(&sbinfo->si_ninodes, /*do_rev*/0));
+ au_lcnt_fin(&sbinfo->si_ninodes, /*do_sync*/0);
+ AuLCntZero(au_lcnt_read(&sbinfo->si_nfiles, /*do_rev*/0));
+ au_lcnt_fin(&sbinfo->si_nfiles, /*do_sync*/0);
+
+ dbgaufs_si_fin(sbinfo);
+ au_rw_write_lock(&sbinfo->si_rwsem);
+ au_br_free(sbinfo);
+ au_rw_write_unlock(&sbinfo->si_rwsem);
+
+ au_kfree_try_rcu(sbinfo->si_branch);
+ mutex_destroy(&sbinfo->si_xib_mtx);
+ AuRwDestroy(&sbinfo->si_rwsem);
+
+ au_lcnt_wait_for_fin(&sbinfo->si_ninodes);
+ /* si_nfiles is waited too */
+ au_kfree_rcu(sbinfo);
+}
+
+int au_si_alloc(struct super_block *sb)
+{
+ int err, i;
+ struct au_sbinfo *sbinfo;
+
+ err = -ENOMEM;
+ sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS);
+ if (unlikely(!sbinfo))
+ goto out;
+
+ /* will be reallocated separately */
+ sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
+ if (unlikely(!sbinfo->si_branch))
+ goto out_sbinfo;
+
+ err = sysaufs_si_init(sbinfo);
+ if (!err) {
+ dbgaufs_si_null(sbinfo);
+ err = dbgaufs_si_init(sbinfo);
+ if (unlikely(err))
+ kobject_put(&sbinfo->si_kobj);
+ }
+ if (unlikely(err))
+ goto out_br;
+
+ au_nwt_init(&sbinfo->si_nowait);
+ au_rw_init_wlock(&sbinfo->si_rwsem);
+
+ au_lcnt_init(&sbinfo->si_ninodes, /*release*/NULL);
+ au_lcnt_init(&sbinfo->si_nfiles, /*release*/NULL);
+
+ sbinfo->si_bbot = -1;
+ sbinfo->si_last_br_id = AUFS_BRANCH_MAX / 2;
+
+ sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
+ sbinfo->si_wbr_create = AuWbrCreate_Def;
+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup;
+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create;
+
+ au_fhsm_init(sbinfo);
+
+ sbinfo->si_mntflags = au_opts_plink(AuOpt_Def);
+
+ sbinfo->si_xino_jiffy = jiffies;
+ sbinfo->si_xino_expire
+ = msecs_to_jiffies(AUFS_XINO_DEF_SEC * MSEC_PER_SEC);
+ mutex_init(&sbinfo->si_xib_mtx);
+ /* leave si_xib_last_pindex and si_xib_next_bit */
+
+ INIT_HLIST_BL_HEAD(&sbinfo->si_aopen);
+
+ sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC);
+ sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+ sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+ sbinfo->si_dirwh = AUFS_DIRWH_DEF;
+
+ for (i = 0; i < AuPlink_NHASH; i++)
+ INIT_HLIST_BL_HEAD(sbinfo->si_plink + i);
+ init_waitqueue_head(&sbinfo->si_plink_wq);
+ spin_lock_init(&sbinfo->si_plink_maint_lock);
+
+ INIT_HLIST_BL_HEAD(&sbinfo->si_files);
+
+ /* with getattr by default */
+ sbinfo->si_iop_array = aufs_iop;
+
+ /* leave other members for sysaufs and si_mnt. */
+ sbinfo->si_sb = sb;
+ sb->s_fs_info = sbinfo;
+ si_pid_set(sb);
+ return 0; /* success */
+
+out_br:
+ au_kfree_try_rcu(sbinfo->si_branch);
+out_sbinfo:
+ au_kfree_rcu(sbinfo);
+out:
+ return err;
+}
+
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr, int may_shrink)
+{
+ int err, sz;
+ struct au_branch **brp;
+
+ AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+ err = -ENOMEM;
+ sz = sizeof(*brp) * (sbinfo->si_bbot + 1);
+ if (unlikely(!sz))
+ sz = sizeof(*brp);
+ brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS,
+ may_shrink);
+ if (brp) {
+ sbinfo->si_branch = brp;
+ err = 0;
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_sigen_inc(struct super_block *sb)
+{
+ unsigned int gen;
+ struct inode *inode;
+
+ SiMustWriteLock(sb);
+
+ gen = ++au_sbi(sb)->si_generation;
+ au_update_digen(sb->s_root);
+ inode = d_inode(sb->s_root);
+ au_update_iigen(inode, /*half*/0);
+ inode_inc_iversion(inode);
+ return gen;
+}
+
+aufs_bindex_t au_new_br_id(struct super_block *sb)
+{
+ aufs_bindex_t br_id;
+ int i;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ for (i = 0; i <= AUFS_BRANCH_MAX; i++) {
+ br_id = ++sbinfo->si_last_br_id;
+ AuDebugOn(br_id < 0);
+ if (br_id && au_br_index(sb, br_id) < 0)
+ return br_id;
+ }
+
+ return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* it is ok that new 'nwt' tasks are appended while we are sleeping */
+int si_read_lock(struct super_block *sb, int flags)
+{
+ int err;
+
+ err = 0;
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+ si_noflush_read_lock(sb);
+ err = au_plink_maint(sb, flags);
+ if (unlikely(err))
+ si_read_unlock(sb);
+
+ return err;
+}
+
+int si_write_lock(struct super_block *sb, int flags)
+{
+ int err;
+
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+ si_noflush_write_lock(sb);
+ err = au_plink_maint(sb, flags);
+ if (unlikely(err))
+ si_write_unlock(sb);
+
+ return err;
+}
+
+/* dentry and super_block lock. call at entry point */
+int aufs_read_lock(struct dentry *dentry, int flags)
+{
+ int err;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, flags);
+ if (unlikely(err))
+ goto out;
+
+ if (au_ftest_lock(flags, DW))
+ di_write_lock_child(dentry);
+ else
+ di_read_lock_child(dentry, flags);
+
+ if (au_ftest_lock(flags, GEN)) {
+ err = au_digen_test(dentry, au_sigen(sb));
+ if (!au_opt_test(au_mntflags(sb), UDBA_NONE))
+ AuDebugOn(!err && au_dbrange_test(dentry));
+ else if (!err)
+ err = au_dbrange_test(dentry);
+ if (unlikely(err))
+ aufs_read_unlock(dentry, flags);
+ }
+
+out:
+ return err;
+}
+
+void aufs_read_unlock(struct dentry *dentry, int flags)
+{
+ if (au_ftest_lock(flags, DW))
+ di_write_unlock(dentry);
+ else
+ di_read_unlock(dentry, flags);
+ si_read_unlock(dentry->d_sb);
+}
+
+void aufs_write_lock(struct dentry *dentry)
+{
+ si_write_lock(dentry->d_sb, AuLock_FLUSH | AuLock_NOPLMW);
+ di_write_lock_child(dentry);
+}
+
+void aufs_write_unlock(struct dentry *dentry)
+{
+ di_write_unlock(dentry);
+ si_write_unlock(dentry->d_sb);
+}
+
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags)
+{
+ int err;
+ unsigned int sigen;
+ struct super_block *sb;
+
+ sb = d1->d_sb;
+ err = si_read_lock(sb, flags);
+ if (unlikely(err))
+ goto out;
+
+ di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIRS));
+
+ if (au_ftest_lock(flags, GEN)) {
+ sigen = au_sigen(sb);
+ err = au_digen_test(d1, sigen);
+ AuDebugOn(!err && au_dbrange_test(d1));
+ if (!err) {
+ err = au_digen_test(d2, sigen);
+ AuDebugOn(!err && au_dbrange_test(d2));
+ }
+ if (unlikely(err))
+ aufs_read_and_write_unlock2(d1, d2);
+ }
+
+out:
+ return err;
+}
+
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+ di_write_unlock2(d1, d2);
+ si_read_unlock(d1->d_sb);
+}
diff --git a/fs/aufs/super.c b/fs/aufs/super.c
new file mode 100644
index 000000000000..e7570b137270
--- /dev/null
+++ b/fs/aufs/super.c
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * mount and super_block operations
+ */
+
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/vmalloc.h>
+#include "aufs.h"
+
+/*
+ * super_operations
+ */
+static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused)
+{
+ struct au_icntnr *c;
+
+ c = au_cache_alloc_icntnr();
+ if (c) {
+ au_icntnr_init(c);
+ inode_set_iversion(&c->vfs_inode, 1); /* sigen(sb); */
+ c->iinfo.ii_hinode = NULL;
+ return &c->vfs_inode;
+ }
+ return NULL;
+}
+
+static void aufs_destroy_inode_cb(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+
+ au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode));
+}
+
+static void aufs_destroy_inode(struct inode *inode)
+{
+ if (!au_is_bad_inode(inode))
+ au_iinfo_fin(inode);
+ call_rcu(&inode->i_rcu, aufs_destroy_inode_cb);
+}
+
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino)
+{
+ struct inode *inode;
+ int err;
+
+ inode = iget_locked(sb, ino);
+ if (unlikely(!inode)) {
+ inode = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ if (!(inode->i_state & I_NEW))
+ goto out;
+
+ err = au_xigen_new(inode);
+ if (!err)
+ err = au_iinfo_init(inode);
+ if (!err)
+ inode_inc_iversion(inode);
+ else {
+ iget_failed(inode);
+ inode = ERR_PTR(err);
+ }
+
+out:
+ /* never return NULL */
+ AuDebugOn(!inode);
+ AuTraceErrPtr(inode);
+ return inode;
+}
+
+/* lock free root dinfo */
+static int au_show_brs(struct seq_file *seq, struct super_block *sb)
+{
+ int err;
+ aufs_bindex_t bindex, bbot;
+ struct path path;
+ struct au_hdentry *hdp;
+ struct au_branch *br;
+ au_br_perm_str_t perm;
+
+ err = 0;
+ bbot = au_sbbot(sb);
+ bindex = 0;
+ hdp = au_hdentry(au_di(sb->s_root), bindex);
+ for (; !err && bindex <= bbot; bindex++, hdp++) {
+ br = au_sbr(sb, bindex);
+ path.mnt = au_br_mnt(br);
+ path.dentry = hdp->hd_dentry;
+ err = au_seq_path(seq, &path);
+ if (!err) {
+ au_optstr_br_perm(&perm, br->br_perm);
+ seq_printf(seq, "=%s", perm.a);
+ if (bindex != bbot)
+ seq_putc(seq, ':');
+ }
+ }
+ if (unlikely(err || seq_has_overflowed(seq)))
+ err = -E2BIG;
+
+ return err;
+}
+
+static void au_gen_fmt(char *fmt, int len __maybe_unused, const char *pat,
+ const char *append)
+{
+ char *p;
+
+ p = fmt;
+ while (*pat != ':')
+ *p++ = *pat++;
+ *p++ = *pat++;
+ strcpy(p, append);
+ AuDebugOn(strlen(fmt) >= len);
+}
+
+static void au_show_wbr_create(struct seq_file *m, int v,
+ struct au_sbinfo *sbinfo)
+{
+ const char *pat;
+ char fmt[32];
+ struct au_wbr_mfs *mfs;
+
+ AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+ seq_puts(m, ",create=");
+ pat = au_optstr_wbr_create(v);
+ mfs = &sbinfo->si_wbr_mfs;
+ switch (v) {
+ case AuWbrCreate_TDP:
+ case AuWbrCreate_RR:
+ case AuWbrCreate_MFS:
+ case AuWbrCreate_PMFS:
+ seq_puts(m, pat);
+ break;
+ case AuWbrCreate_MFSRR:
+ case AuWbrCreate_TDMFS:
+ case AuWbrCreate_PMFSRR:
+ au_gen_fmt(fmt, sizeof(fmt), pat, "%llu");
+ seq_printf(m, fmt, mfs->mfsrr_watermark);
+ break;
+ case AuWbrCreate_MFSV:
+ case AuWbrCreate_PMFSV:
+ au_gen_fmt(fmt, sizeof(fmt), pat, "%lu");
+ seq_printf(m, fmt,
+ jiffies_to_msecs(mfs->mfs_expire)
+ / MSEC_PER_SEC);
+ break;
+ case AuWbrCreate_MFSRRV:
+ case AuWbrCreate_TDMFSV:
+ case AuWbrCreate_PMFSRRV:
+ au_gen_fmt(fmt, sizeof(fmt), pat, "%llu:%lu");
+ seq_printf(m, fmt, mfs->mfsrr_watermark,
+ jiffies_to_msecs(mfs->mfs_expire) / MSEC_PER_SEC);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int au_show_xino(struct seq_file *seq, struct super_block *sb)
+{
+#ifdef CONFIG_SYSFS
+ return 0;
+#else
+ int err;
+ const int len = sizeof(AUFS_XINO_FNAME) - 1;
+ aufs_bindex_t bindex, brid;
+ struct qstr *name;
+ struct file *f;
+ struct dentry *d, *h_root;
+ struct au_branch *br;
+
+ AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+ err = 0;
+ f = au_sbi(sb)->si_xib;
+ if (!f)
+ goto out;
+
+ /* stop printing the default xino path on the first writable branch */
+ h_root = NULL;
+ bindex = au_xi_root(sb, f->f_path.dentry);
+ if (bindex >= 0) {
+ br = au_sbr_sb(sb, bindex);
+ h_root = au_br_dentry(br);
+ }
+
+ d = f->f_path.dentry;
+ name = &d->d_name;
+ /* safe ->d_parent because the file is unlinked */
+ if (d->d_parent == h_root
+ && name->len == len
+ && !memcmp(name->name, AUFS_XINO_FNAME, len))
+ goto out;
+
+ seq_puts(seq, ",xino=");
+ err = au_xino_path(seq, f);
+
+out:
+ return err;
+#endif
+}
+
+/* seq_file will re-call me in case of too long string */
+static int aufs_show_options(struct seq_file *m, struct dentry *dentry)
+{
+ int err;
+ unsigned int mnt_flags, v;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+
+#define AuBool(name, str) do { \
+ v = au_opt_test(mnt_flags, name); \
+ if (v != au_opt_test(AuOpt_Def, name)) \
+ seq_printf(m, ",%s" #str, v ? "" : "no"); \
+} while (0)
+
+#define AuStr(name, str) do { \
+ v = mnt_flags & AuOptMask_##name; \
+ if (v != (AuOpt_Def & AuOptMask_##name)) \
+ seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \
+} while (0)
+
+#define AuUInt(name, str, val) do { \
+ if (val != AUFS_##name##_DEF) \
+ seq_printf(m, "," #str "=%u", val); \
+} while (0)
+
+ sb = dentry->d_sb;
+ if (sb->s_flags & SB_POSIXACL)
+ seq_puts(m, ",acl");
+#if 0
+ if (sb->s_flags & SB_I_VERSION)
+ seq_puts(m, ",i_version");
+#endif
+
+ /* lock free root dinfo */
+ si_noflush_read_lock(sb);
+ sbinfo = au_sbi(sb);
+ seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo));
+
+ mnt_flags = au_mntflags(sb);
+ if (au_opt_test(mnt_flags, XINO)) {
+ err = au_show_xino(m, sb);
+ if (unlikely(err))
+ goto out;
+ } else
+ seq_puts(m, ",noxino");
+
+ AuBool(TRUNC_XINO, trunc_xino);
+ AuStr(UDBA, udba);
+ AuBool(SHWH, shwh);
+ AuBool(PLINK, plink);
+ AuBool(DIO, dio);
+ AuBool(DIRPERM1, dirperm1);
+
+ v = sbinfo->si_wbr_create;
+ if (v != AuWbrCreate_Def)
+ au_show_wbr_create(m, v, sbinfo);
+
+ v = sbinfo->si_wbr_copyup;
+ if (v != AuWbrCopyup_Def)
+ seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v));
+
+ v = au_opt_test(mnt_flags, ALWAYS_DIROPQ);
+ if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ))
+ seq_printf(m, ",diropq=%c", v ? 'a' : 'w');
+
+ AuUInt(DIRWH, dirwh, sbinfo->si_dirwh);
+
+ v = jiffies_to_msecs(sbinfo->si_rdcache) / MSEC_PER_SEC;
+ AuUInt(RDCACHE, rdcache, v);
+
+ AuUInt(RDBLK, rdblk, sbinfo->si_rdblk);
+ AuUInt(RDHASH, rdhash, sbinfo->si_rdhash);
+
+ au_fhsm_show(m, sbinfo);
+
+ AuBool(DIRREN, dirren);
+ AuBool(SUM, sum);
+ /* AuBool(SUM_W, wsum); */
+ AuBool(WARN_PERM, warn_perm);
+ AuBool(VERBOSE, verbose);
+
+out:
+ /* be sure to print "br:" last */
+ if (!sysaufs_brs) {
+ seq_puts(m, ",br:");
+ au_show_brs(m, sb);
+ }
+ si_read_unlock(sb);
+ return 0;
+
+#undef AuBool
+#undef AuStr
+#undef AuUInt
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* sum mode which returns the summation for statfs(2) */
+
+static u64 au_add_till_max(u64 a, u64 b)
+{
+ u64 old;
+
+ old = a;
+ a += b;
+ if (old <= a)
+ return a;
+ return ULLONG_MAX;
+}
+
+static u64 au_mul_till_max(u64 a, long mul)
+{
+ u64 old;
+
+ old = a;
+ a *= mul;
+ if (old <= a)
+ return a;
+ return ULLONG_MAX;
+}
+
+static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf)
+{
+ int err;
+ long bsize, factor;
+ u64 blocks, bfree, bavail, files, ffree;
+ aufs_bindex_t bbot, bindex, i;
+ unsigned char shared;
+ struct path h_path;
+ struct super_block *h_sb;
+
+ err = 0;
+ bsize = LONG_MAX;
+ files = 0;
+ ffree = 0;
+ blocks = 0;
+ bfree = 0;
+ bavail = 0;
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ h_path.mnt = au_sbr_mnt(sb, bindex);
+ h_sb = h_path.mnt->mnt_sb;
+ shared = 0;
+ for (i = 0; !shared && i < bindex; i++)
+ shared = (au_sbr_sb(sb, i) == h_sb);
+ if (shared)
+ continue;
+
+ /* sb->s_root for NFS is unreliable */
+ h_path.dentry = h_path.mnt->mnt_root;
+ err = vfs_statfs(&h_path, buf);
+ if (unlikely(err))
+ goto out;
+
+ if (bsize > buf->f_bsize) {
+ /*
+ * we will reduce bsize, so we have to expand blocks
+ * etc. to match them again
+ */
+ factor = (bsize / buf->f_bsize);
+ blocks = au_mul_till_max(blocks, factor);
+ bfree = au_mul_till_max(bfree, factor);
+ bavail = au_mul_till_max(bavail, factor);
+ bsize = buf->f_bsize;
+ }
+
+ factor = (buf->f_bsize / bsize);
+ blocks = au_add_till_max(blocks,
+ au_mul_till_max(buf->f_blocks, factor));
+ bfree = au_add_till_max(bfree,
+ au_mul_till_max(buf->f_bfree, factor));
+ bavail = au_add_till_max(bavail,
+ au_mul_till_max(buf->f_bavail, factor));
+ files = au_add_till_max(files, buf->f_files);
+ ffree = au_add_till_max(ffree, buf->f_ffree);
+ }
+
+ buf->f_bsize = bsize;
+ buf->f_blocks = blocks;
+ buf->f_bfree = bfree;
+ buf->f_bavail = bavail;
+ buf->f_files = files;
+ buf->f_ffree = ffree;
+ buf->f_frsize = 0;
+
+out:
+ return err;
+}
+
+static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ int err;
+ struct path h_path;
+ struct super_block *sb;
+
+ /* lock free root dinfo */
+ sb = dentry->d_sb;
+ si_noflush_read_lock(sb);
+ if (!au_opt_test(au_mntflags(sb), SUM)) {
+ /* sb->s_root for NFS is unreliable */
+ h_path.mnt = au_sbr_mnt(sb, 0);
+ h_path.dentry = h_path.mnt->mnt_root;
+ err = vfs_statfs(&h_path, buf);
+ } else
+ err = au_statfs_sum(sb, buf);
+ si_read_unlock(sb);
+
+ if (!err) {
+ buf->f_type = AUFS_SUPER_MAGIC;
+ buf->f_namelen = AUFS_MAX_NAMELEN;
+ memset(&buf->f_fsid, 0, sizeof(buf->f_fsid));
+ }
+ /* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_sync_fs(struct super_block *sb, int wait)
+{
+ int err, e;
+ aufs_bindex_t bbot, bindex;
+ struct au_branch *br;
+ struct super_block *h_sb;
+
+ err = 0;
+ si_noflush_read_lock(sb);
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!au_br_writable(br->br_perm))
+ continue;
+
+ h_sb = au_sbr_sb(sb, bindex);
+ e = vfsub_sync_filesystem(h_sb, wait);
+ if (unlikely(e && !err))
+ err = e;
+ /* go on even if an error happens */
+ }
+ si_read_unlock(sb);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* final actions when unmounting a file system */
+static void aufs_put_super(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ sbinfo = au_sbi(sb);
+ if (sbinfo)
+ kobject_put(&sbinfo->si_kobj);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb,
+ struct super_block *sb, void *arg)
+{
+ void *array;
+ unsigned long long n, sz;
+
+ array = NULL;
+ n = 0;
+ if (!*hint)
+ goto out;
+
+ if (*hint > ULLONG_MAX / sizeof(array)) {
+ array = ERR_PTR(-EMFILE);
+ pr_err("hint %llu\n", *hint);
+ goto out;
+ }
+
+ sz = sizeof(array) * *hint;
+ array = kzalloc(sz, GFP_NOFS);
+ if (unlikely(!array))
+ array = vzalloc(sz);
+ if (unlikely(!array)) {
+ array = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ n = cb(sb, array, *hint, arg);
+ AuDebugOn(n > *hint);
+
+out:
+ *hint = n;
+ return array;
+}
+
+static unsigned long long au_iarray_cb(struct super_block *sb, void *a,
+ unsigned long long max __maybe_unused,
+ void *arg)
+{
+ unsigned long long n;
+ struct inode **p, *inode;
+ struct list_head *head;
+
+ n = 0;
+ p = a;
+ head = arg;
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, head, i_sb_list) {
+ if (!au_is_bad_inode(inode)
+ && au_ii(inode)->ii_btop >= 0) {
+ spin_lock(&inode->i_lock);
+ if (atomic_read(&inode->i_count)) {
+ au_igrab(inode);
+ *p++ = inode;
+ n++;
+ AuDebugOn(n > max);
+ }
+ spin_unlock(&inode->i_lock);
+ }
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+
+ return n;
+}
+
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max)
+{
+ struct au_sbinfo *sbi;
+
+ sbi = au_sbi(sb);
+ *max = au_lcnt_read(&sbi->si_ninodes, /*do_rev*/1);
+ return au_array_alloc(max, au_iarray_cb, sb, &sb->s_inodes);
+}
+
+void au_iarray_free(struct inode **a, unsigned long long max)
+{
+ unsigned long long ull;
+
+ for (ull = 0; ull < max; ull++)
+ iput(a[ull]);
+ kvfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * refresh dentry and inode at remount time.
+ */
+/* todo: consolidate with simple_reval_dpath() and au_reval_for_attr() */
+static int au_do_refresh(struct dentry *dentry, unsigned int dir_flags,
+ struct dentry *parent)
+{
+ int err;
+
+ di_write_lock_child(dentry);
+ di_read_lock_parent(parent, AuLock_IR);
+ err = au_refresh_dentry(dentry, parent);
+ if (!err && dir_flags)
+ au_hn_reset(d_inode(dentry), dir_flags);
+ di_read_unlock(parent, AuLock_IR);
+ di_write_unlock(dentry);
+
+ return err;
+}
+
+static int au_do_refresh_d(struct dentry *dentry, unsigned int sigen,
+ struct au_sbinfo *sbinfo,
+ const unsigned int dir_flags, unsigned int do_idop)
+{
+ int err;
+ struct dentry *parent;
+
+ err = 0;
+ parent = dget_parent(dentry);
+ if (!au_digen_test(parent, sigen) && au_digen_test(dentry, sigen)) {
+ if (d_really_is_positive(dentry)) {
+ if (!d_is_dir(dentry))
+ err = au_do_refresh(dentry, /*dir_flags*/0,
+ parent);
+ else {
+ err = au_do_refresh(dentry, dir_flags, parent);
+ if (unlikely(err))
+ au_fset_si(sbinfo, FAILED_REFRESH_DIR);
+ }
+ } else
+ err = au_do_refresh(dentry, /*dir_flags*/0, parent);
+ AuDbgDentry(dentry);
+ }
+ dput(parent);
+
+ if (!err) {
+ if (do_idop)
+ au_refresh_dop(dentry, /*force_reval*/0);
+ } else
+ au_refresh_dop(dentry, /*force_reval*/1);
+
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_refresh_d(struct super_block *sb, unsigned int do_idop)
+{
+ int err, i, j, ndentry, e;
+ unsigned int sigen;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry **dentries, *d;
+ struct au_sbinfo *sbinfo;
+ struct dentry *root = sb->s_root;
+ const unsigned int dir_flags = au_hi_flags(d_inode(root), /*isdir*/1);
+
+ if (do_idop)
+ au_refresh_dop(root, /*force_reval*/0);
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_dcsub_pages(&dpages, root, NULL, NULL);
+ if (unlikely(err))
+ goto out_dpages;
+
+ sigen = au_sigen(sb);
+ sbinfo = au_sbi(sb);
+ for (i = 0; i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ dentries = dpage->dentries;
+ ndentry = dpage->ndentry;
+ for (j = 0; j < ndentry; j++) {
+ d = dentries[j];
+ e = au_do_refresh_d(d, sigen, sbinfo, dir_flags,
+ do_idop);
+ if (unlikely(e && !err))
+ err = e;
+ /* go on even err */
+ }
+ }
+
+out_dpages:
+ au_dpages_free(&dpages);
+out:
+ return err;
+}
+
+static int au_refresh_i(struct super_block *sb, unsigned int do_idop)
+{
+ int err, e;
+ unsigned int sigen;
+ unsigned long long max, ull;
+ struct inode *inode, **array;
+
+ array = au_iarray_alloc(sb, &max);
+ err = PTR_ERR(array);
+ if (IS_ERR(array))
+ goto out;
+
+ err = 0;
+ sigen = au_sigen(sb);
+ for (ull = 0; ull < max; ull++) {
+ inode = array[ull];
+ if (unlikely(!inode))
+ break;
+
+ e = 0;
+ ii_write_lock_child(inode);
+ if (au_iigen(inode, NULL) != sigen) {
+ e = au_refresh_hinode_self(inode);
+ if (unlikely(e)) {
+ au_refresh_iop(inode, /*force_getattr*/1);
+ pr_err("error %d, i%lu\n", e, inode->i_ino);
+ if (!err)
+ err = e;
+ /* go on even if err */
+ }
+ }
+ if (!e && do_idop)
+ au_refresh_iop(inode, /*force_getattr*/0);
+ ii_write_unlock(inode);
+ }
+
+ au_iarray_free(array, max);
+
+out:
+ return err;
+}
+
+static void au_remount_refresh(struct super_block *sb, unsigned int do_idop)
+{
+ int err, e;
+ unsigned int udba;
+ aufs_bindex_t bindex, bbot;
+ struct dentry *root;
+ struct inode *inode;
+ struct au_branch *br;
+ struct au_sbinfo *sbi;
+
+ au_sigen_inc(sb);
+ sbi = au_sbi(sb);
+ au_fclr_si(sbi, FAILED_REFRESH_DIR);
+
+ root = sb->s_root;
+ DiMustNoWaiters(root);
+ inode = d_inode(root);
+ IiMustNoWaiters(inode);
+
+ udba = au_opt_udba(sb);
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ err = au_hnotify_reset_br(udba, br, br->br_perm);
+ if (unlikely(err))
+ AuIOErr("hnotify failed on br %d, %d, ignored\n",
+ bindex, err);
+ /* go on even if err */
+ }
+ au_hn_reset(inode, au_hi_flags(inode, /*isdir*/1));
+
+ if (do_idop) {
+ if (au_ftest_si(sbi, NO_DREVAL)) {
+ AuDebugOn(sb->s_d_op == &aufs_dop_noreval);
+ sb->s_d_op = &aufs_dop_noreval;
+ AuDebugOn(sbi->si_iop_array == aufs_iop_nogetattr);
+ sbi->si_iop_array = aufs_iop_nogetattr;
+ } else {
+ AuDebugOn(sb->s_d_op == &aufs_dop);
+ sb->s_d_op = &aufs_dop;
+ AuDebugOn(sbi->si_iop_array == aufs_iop);
+ sbi->si_iop_array = aufs_iop;
+ }
+ pr_info("reset to %ps and %ps\n",
+ sb->s_d_op, sbi->si_iop_array);
+ }
+
+ di_write_unlock(root);
+ err = au_refresh_d(sb, do_idop);
+ e = au_refresh_i(sb, do_idop);
+ if (unlikely(e && !err))
+ err = e;
+ /* aufs_write_lock() calls ..._child() */
+ di_write_lock_child(root);
+
+ au_cpup_attr_all(inode, /*force*/1);
+
+ if (unlikely(err))
+ AuIOErr("refresh failed, ignored, %d\n", err);
+}
+
+/* stop extra interpretation of errno in mount(8), and strange error messages */
+static int cvt_err(int err)
+{
+ AuTraceErr(err);
+
+ switch (err) {
+ case -ENOENT:
+ case -ENOTDIR:
+ case -EEXIST:
+ case -EIO:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int aufs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+ int err, do_dx;
+ unsigned int mntflags;
+ struct au_opts opts = {
+ .opt = NULL
+ };
+ struct dentry *root;
+ struct inode *inode;
+ struct au_sbinfo *sbinfo;
+
+ err = 0;
+ root = sb->s_root;
+ if (!data || !*data) {
+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (!err) {
+ di_write_lock_child(root);
+ err = au_opts_verify(sb, *flags, /*pending*/0);
+ aufs_write_unlock(root);
+ }
+ goto out;
+ }
+
+ err = -ENOMEM;
+ opts.opt = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!opts.opt))
+ goto out;
+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+ opts.flags = AuOpts_REMOUNT;
+ opts.sb_flags = *flags;
+
+ /* parse it before aufs lock */
+ err = au_opts_parse(sb, data, &opts);
+ if (unlikely(err))
+ goto out_opts;
+
+ sbinfo = au_sbi(sb);
+ inode = d_inode(root);
+ inode_lock(inode);
+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out_mtx;
+ di_write_lock_child(root);
+
+ /* au_opts_remount() may return an error */
+ err = au_opts_remount(sb, &opts);
+ au_opts_free(&opts);
+
+ if (au_ftest_opts(opts.flags, REFRESH))
+ au_remount_refresh(sb, au_ftest_opts(opts.flags, REFRESH_IDOP));
+
+ if (au_ftest_opts(opts.flags, REFRESH_DYAOP)) {
+ mntflags = au_mntflags(sb);
+ do_dx = !!au_opt_test(mntflags, DIO);
+ au_dy_arefresh(do_dx);
+ }
+
+ au_fhsm_wrote_all(sb, /*force*/1); /* ?? */
+ aufs_write_unlock(root);
+
+out_mtx:
+ inode_unlock(inode);
+out_opts:
+ free_page((unsigned long)opts.opt);
+out:
+ err = cvt_err(err);
+ AuTraceErr(err);
+ return err;
+}
+
+static const struct super_operations aufs_sop = {
+ .alloc_inode = aufs_alloc_inode,
+ .destroy_inode = aufs_destroy_inode,
+ /* always deleting, no clearing */
+ .drop_inode = generic_delete_inode,
+ .show_options = aufs_show_options,
+ .statfs = aufs_statfs,
+ .put_super = aufs_put_super,
+ .sync_fs = aufs_sync_fs,
+ .remount_fs = aufs_remount_fs
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int alloc_root(struct super_block *sb)
+{
+ int err;
+ struct inode *inode;
+ struct dentry *root;
+
+ err = -ENOMEM;
+ inode = au_iget_locked(sb, AUFS_ROOT_INO);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out;
+
+ inode->i_op = aufs_iop + AuIop_DIR; /* with getattr by default */
+ inode->i_fop = &aufs_dir_fop;
+ inode->i_mode = S_IFDIR;
+ set_nlink(inode, 2);
+ unlock_new_inode(inode);
+
+ root = d_make_root(inode);
+ if (unlikely(!root))
+ goto out;
+ err = PTR_ERR(root);
+ if (IS_ERR(root))
+ goto out;
+
+ err = au_di_init(root);
+ if (!err) {
+ sb->s_root = root;
+ return 0; /* success */
+ }
+ dput(root);
+
+out:
+ return err;
+}
+
+static int aufs_fill_super(struct super_block *sb, void *raw_data,
+ int silent __maybe_unused)
+{
+ int err;
+ struct au_opts opts = {
+ .opt = NULL
+ };
+ struct au_sbinfo *sbinfo;
+ struct dentry *root;
+ struct inode *inode;
+ char *arg = raw_data;
+
+ if (unlikely(!arg || !*arg)) {
+ err = -EINVAL;
+ pr_err("no arg\n");
+ goto out;
+ }
+
+ err = -ENOMEM;
+ opts.opt = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!opts.opt))
+ goto out;
+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+ opts.sb_flags = sb->s_flags;
+
+ err = au_si_alloc(sb);
+ if (unlikely(err))
+ goto out_opts;
+ sbinfo = au_sbi(sb);
+
+ /* all timestamps always follow the ones on the branch */
+ sb->s_flags |= SB_NOATIME | SB_NODIRATIME;
+ sb->s_flags |= SB_I_VERSION; /* do we really need this? */
+ sb->s_op = &aufs_sop;
+ sb->s_d_op = &aufs_dop;
+ sb->s_magic = AUFS_SUPER_MAGIC;
+ sb->s_maxbytes = 0;
+ sb->s_stack_depth = 1;
+ au_export_init(sb);
+ au_xattr_init(sb);
+
+ err = alloc_root(sb);
+ if (unlikely(err)) {
+ si_write_unlock(sb);
+ goto out_info;
+ }
+ root = sb->s_root;
+ inode = d_inode(root);
+
+ /*
+ * actually we can parse options regardless aufs lock here.
+ * but at remount time, parsing must be done before aufs lock.
+ * so we follow the same rule.
+ */
+ ii_write_lock_parent(inode);
+ aufs_write_unlock(root);
+ err = au_opts_parse(sb, arg, &opts);
+ if (unlikely(err))
+ goto out_root;
+
+ /* lock vfs_inode first, then aufs. */
+ inode_lock(inode);
+ aufs_write_lock(root);
+ err = au_opts_mount(sb, &opts);
+ au_opts_free(&opts);
+ if (!err && au_ftest_si(sbinfo, NO_DREVAL)) {
+ sb->s_d_op = &aufs_dop_noreval;
+ pr_info("%ps\n", sb->s_d_op);
+ au_refresh_dop(root, /*force_reval*/0);
+ sbinfo->si_iop_array = aufs_iop_nogetattr;
+ au_refresh_iop(inode, /*force_getattr*/0);
+ }
+ aufs_write_unlock(root);
+ inode_unlock(inode);
+ if (!err)
+ goto out_opts; /* success */
+
+out_root:
+ dput(root);
+ sb->s_root = NULL;
+out_info:
+ kobject_put(&sbinfo->si_kobj);
+ sb->s_fs_info = NULL;
+out_opts:
+ free_page((unsigned long)opts.opt);
+out:
+ AuTraceErr(err);
+ err = cvt_err(err);
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name __maybe_unused,
+ void *raw_data)
+{
+ struct dentry *root;
+
+ /* all timestamps always follow the ones on the branch */
+ /* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */
+ root = mount_nodev(fs_type, flags, raw_data, aufs_fill_super);
+ if (IS_ERR(root))
+ goto out;
+
+ au_sbilist_add(root->d_sb);
+
+out:
+ return root;
+}
+
+static void aufs_kill_sb(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ sbinfo = au_sbi(sb);
+ if (sbinfo) {
+ au_sbilist_del(sb);
+ aufs_write_lock(sb->s_root);
+ au_fhsm_fin(sb);
+ if (sbinfo->si_wbr_create_ops->fin)
+ sbinfo->si_wbr_create_ops->fin(sb);
+ if (au_opt_test(sbinfo->si_mntflags, UDBA_HNOTIFY)) {
+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_NONE);
+ au_remount_refresh(sb, /*do_idop*/0);
+ }
+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
+ au_plink_put(sb, /*verbose*/1);
+ au_xino_clr(sb);
+ au_dr_opt_flush(sb);
+ sbinfo->si_sb = NULL;
+ aufs_write_unlock(sb->s_root);
+ au_nwt_flush(&sbinfo->si_nowait);
+ }
+ kill_anon_super(sb);
+}
+
+struct file_system_type aufs_fs_type = {
+ .name = AUFS_FSTYPE,
+ /* a race between rename and others */
+ .fs_flags = FS_RENAME_DOES_D_MOVE,
+ .mount = aufs_mount,
+ .kill_sb = aufs_kill_sb,
+ /* no need to __module_get() and module_put(). */
+ .owner = THIS_MODULE,
+};
diff --git a/fs/aufs/super.h b/fs/aufs/super.h
new file mode 100644
index 000000000000..74571acf3eb9
--- /dev/null
+++ b/fs/aufs/super.h
@@ -0,0 +1,589 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * super_block operations
+ */
+
+#ifndef __AUFS_SUPER_H__
+#define __AUFS_SUPER_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include "hbl.h"
+#include "lcnt.h"
+#include "rwsem.h"
+#include "wkq.h"
+
+/* policies to select one among multiple writable branches */
+struct au_wbr_copyup_operations {
+ int (*copyup)(struct dentry *dentry);
+};
+
+#define AuWbr_DIR 1 /* target is a dir */
+#define AuWbr_PARENT (1 << 1) /* always require a parent */
+
+#define au_ftest_wbr(flags, name) ((flags) & AuWbr_##name)
+#define au_fset_wbr(flags, name) { (flags) |= AuWbr_##name; }
+#define au_fclr_wbr(flags, name) { (flags) &= ~AuWbr_##name; }
+
+struct au_wbr_create_operations {
+ int (*create)(struct dentry *dentry, unsigned int flags);
+ int (*init)(struct super_block *sb);
+ int (*fin)(struct super_block *sb);
+};
+
+struct au_wbr_mfs {
+ struct mutex mfs_lock; /* protect this structure */
+ unsigned long mfs_jiffy;
+ unsigned long mfs_expire;
+ aufs_bindex_t mfs_bindex;
+
+ unsigned long long mfsrr_bytes;
+ unsigned long long mfsrr_watermark;
+};
+
+#define AuPlink_NHASH 100
+static inline int au_plink_hash(ino_t ino)
+{
+ return ino % AuPlink_NHASH;
+}
+
+/* File-based Hierarchical Storage Management */
+struct au_fhsm {
+#ifdef CONFIG_AUFS_FHSM
+ /* allow only one process who can receive the notification */
+ spinlock_t fhsm_spin;
+ pid_t fhsm_pid;
+ wait_queue_head_t fhsm_wqh;
+ atomic_t fhsm_readable;
+
+ /* these are protected by si_rwsem */
+ unsigned long fhsm_expire;
+ aufs_bindex_t fhsm_bottom;
+#endif
+};
+
+struct au_branch;
+struct au_sbinfo {
+ /* nowait tasks in the system-wide workqueue */
+ struct au_nowait_tasks si_nowait;
+
+ /*
+ * tried sb->s_umount, but failed due to the dependency between i_mutex.
+ * rwsem for au_sbinfo is necessary.
+ */
+ struct au_rwsem si_rwsem;
+
+ /*
+ * dirty approach to protect sb->sb_inodes and ->s_files (gone) from
+ * remount.
+ */
+ au_lcnt_t si_ninodes, si_nfiles;
+
+ /* branch management */
+ unsigned int si_generation;
+
+ /* see AuSi_ flags */
+ unsigned char au_si_status;
+
+ aufs_bindex_t si_bbot;
+
+ /* dirty trick to keep br_id plus */
+ unsigned int si_last_br_id :
+ sizeof(aufs_bindex_t) * BITS_PER_BYTE - 1;
+ struct au_branch **si_branch;
+
+ /* policy to select a writable branch */
+ unsigned char si_wbr_copyup;
+ unsigned char si_wbr_create;
+ struct au_wbr_copyup_operations *si_wbr_copyup_ops;
+ struct au_wbr_create_operations *si_wbr_create_ops;
+
+ /* round robin */
+ atomic_t si_wbr_rr_next;
+
+ /* most free space */
+ struct au_wbr_mfs si_wbr_mfs;
+
+ /* File-based Hierarchical Storage Management */
+ struct au_fhsm si_fhsm;
+
+ /* mount flags */
+ /* include/asm-ia64/siginfo.h defines a macro named si_flags */
+ unsigned int si_mntflags;
+
+ /* external inode number (bitmap and translation table) */
+ vfs_readf_t si_xread;
+ vfs_writef_t si_xwrite;
+ loff_t si_ximaxent; /* max entries in a xino */
+
+ struct file *si_xib;
+ struct mutex si_xib_mtx; /* protect xib members */
+ unsigned long *si_xib_buf;
+ unsigned long si_xib_last_pindex;
+ int si_xib_next_bit;
+
+ unsigned long si_xino_jiffy;
+ unsigned long si_xino_expire;
+ /* reserved for future use */
+ /* unsigned long long si_xib_limit; */ /* Max xib file size */
+
+#ifdef CONFIG_AUFS_EXPORT
+ /* i_generation */
+ /* todo: make xigen file an array to support many inode numbers */
+ struct file *si_xigen;
+ atomic_t si_xigen_next;
+#endif
+
+ /* dirty trick to support atomic_open */
+ struct hlist_bl_head si_aopen;
+
+ /* vdir parameters */
+ unsigned long si_rdcache; /* max cache time in jiffies */
+ unsigned int si_rdblk; /* deblk size */
+ unsigned int si_rdhash; /* hash size */
+
+ /*
+ * If the number of whiteouts are larger than si_dirwh, leave all of
+ * them after au_whtmp_ren to reduce the cost of rmdir(2).
+ * future fsck.aufs or kernel thread will remove them later.
+ * Otherwise, remove all whiteouts and the dir in rmdir(2).
+ */
+ unsigned int si_dirwh;
+
+ /* pseudo_link list */
+ struct hlist_bl_head si_plink[AuPlink_NHASH];
+ wait_queue_head_t si_plink_wq;
+ spinlock_t si_plink_maint_lock;
+ pid_t si_plink_maint_pid;
+
+ /* file list */
+ struct hlist_bl_head si_files;
+
+ /* with/without getattr, brother of sb->s_d_op */
+ struct inode_operations *si_iop_array;
+
+ /*
+ * sysfs and lifetime management.
+ * this is not a small structure and it may be a waste of memory in case
+ * of sysfs is disabled, particularly when many aufs-es are mounted.
+ * but using sysfs is majority.
+ */
+ struct kobject si_kobj;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *si_dbgaufs;
+ struct dentry *si_dbgaufs_plink;
+ struct dentry *si_dbgaufs_xib;
+#ifdef CONFIG_AUFS_EXPORT
+ struct dentry *si_dbgaufs_xigen;
+#endif
+#endif
+
+#ifdef CONFIG_AUFS_SBILIST
+ struct hlist_bl_node si_list;
+#endif
+
+ /* dirty, necessary for unmounting, sysfs and sysrq */
+ struct super_block *si_sb;
+};
+
+/* sbinfo status flags */
+/*
+ * set true when refresh_dirs() failed at remount time.
+ * then try refreshing dirs at access time again.
+ * if it is false, refreshing dirs at access time is unnecessary
+ */
+#define AuSi_FAILED_REFRESH_DIR 1
+#define AuSi_FHSM (1 << 1) /* fhsm is active now */
+#define AuSi_NO_DREVAL (1 << 2) /* disable all d_revalidate */
+
+#ifndef CONFIG_AUFS_FHSM
+#undef AuSi_FHSM
+#define AuSi_FHSM 0
+#endif
+
+static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi,
+ unsigned int flag)
+{
+ AuRwMustAnyLock(&sbi->si_rwsem);
+ return sbi->au_si_status & flag;
+}
+#define au_ftest_si(sbinfo, name) au_do_ftest_si(sbinfo, AuSi_##name)
+#define au_fset_si(sbinfo, name) do { \
+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+ (sbinfo)->au_si_status |= AuSi_##name; \
+} while (0)
+#define au_fclr_si(sbinfo, name) do { \
+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+ (sbinfo)->au_si_status &= ~AuSi_##name; \
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* policy to select one among writable branches */
+#define AuWbrCopyup(sbinfo, ...) \
+ ((sbinfo)->si_wbr_copyup_ops->copyup(__VA_ARGS__))
+#define AuWbrCreate(sbinfo, ...) \
+ ((sbinfo)->si_wbr_create_ops->create(__VA_ARGS__))
+
+/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */
+#define AuLock_DW 1 /* write-lock dentry */
+#define AuLock_IR (1 << 1) /* read-lock inode */
+#define AuLock_IW (1 << 2) /* write-lock inode */
+#define AuLock_FLUSH (1 << 3) /* wait for 'nowait' tasks */
+#define AuLock_DIRS (1 << 4) /* target is a pair of dirs */
+ /* except RENAME_EXCHANGE */
+#define AuLock_NOPLM (1 << 5) /* return err in plm mode */
+#define AuLock_NOPLMW (1 << 6) /* wait for plm mode ends */
+#define AuLock_GEN (1 << 7) /* test digen/iigen */
+#define au_ftest_lock(flags, name) ((flags) & AuLock_##name)
+#define au_fset_lock(flags, name) \
+ do { (flags) |= AuLock_##name; } while (0)
+#define au_fclr_lock(flags, name) \
+ do { (flags) &= ~AuLock_##name; } while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* super.c */
+extern struct file_system_type aufs_fs_type;
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino);
+typedef unsigned long long (*au_arraycb_t)(struct super_block *sb, void *array,
+ unsigned long long max, void *arg);
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb,
+ struct super_block *sb, void *arg);
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max);
+void au_iarray_free(struct inode **a, unsigned long long max);
+
+/* sbinfo.c */
+void au_si_free(struct kobject *kobj);
+int au_si_alloc(struct super_block *sb);
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr, int may_shrink);
+
+unsigned int au_sigen_inc(struct super_block *sb);
+aufs_bindex_t au_new_br_id(struct super_block *sb);
+
+int si_read_lock(struct super_block *sb, int flags);
+int si_write_lock(struct super_block *sb, int flags);
+int aufs_read_lock(struct dentry *dentry, int flags);
+void aufs_read_unlock(struct dentry *dentry, int flags);
+void aufs_write_lock(struct dentry *dentry);
+void aufs_write_unlock(struct dentry *dentry);
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags);
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+/* wbr_policy.c */
+extern struct au_wbr_copyup_operations au_wbr_copyup_ops[];
+extern struct au_wbr_create_operations au_wbr_create_ops[];
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex);
+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t btop);
+
+/* mvdown.c */
+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *arg);
+
+#ifdef CONFIG_AUFS_FHSM
+/* fhsm.c */
+
+static inline pid_t au_fhsm_pid(struct au_fhsm *fhsm)
+{
+ pid_t pid;
+
+ spin_lock(&fhsm->fhsm_spin);
+ pid = fhsm->fhsm_pid;
+ spin_unlock(&fhsm->fhsm_spin);
+
+ return pid;
+}
+
+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force);
+void au_fhsm_wrote_all(struct super_block *sb, int force);
+int au_fhsm_fd(struct super_block *sb, int oflags);
+int au_fhsm_br_alloc(struct au_branch *br);
+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex);
+void au_fhsm_fin(struct super_block *sb);
+void au_fhsm_init(struct au_sbinfo *sbinfo);
+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec);
+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo);
+#else
+AuStubVoid(au_fhsm_wrote, struct super_block *sb, aufs_bindex_t bindex,
+ int force)
+AuStubVoid(au_fhsm_wrote_all, struct super_block *sb, int force)
+AuStub(int, au_fhsm_fd, return -EOPNOTSUPP, struct super_block *sb, int oflags)
+AuStub(pid_t, au_fhsm_pid, return 0, struct au_fhsm *fhsm)
+AuStubInt0(au_fhsm_br_alloc, struct au_branch *br)
+AuStubVoid(au_fhsm_set_bottom, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(au_fhsm_fin, struct super_block *sb)
+AuStubVoid(au_fhsm_init, struct au_sbinfo *sbinfo)
+AuStubVoid(au_fhsm_set, struct au_sbinfo *sbinfo, unsigned int sec)
+AuStubVoid(au_fhsm_show, struct seq_file *seq, struct au_sbinfo *sbinfo)
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_sbinfo *au_sbi(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+int au_test_nfsd(void);
+void au_export_init(struct super_block *sb);
+void au_xigen_inc(struct inode *inode);
+int au_xigen_new(struct inode *inode);
+int au_xigen_set(struct super_block *sb, struct path *path);
+void au_xigen_clr(struct super_block *sb);
+
+static inline int au_busy_or_stale(void)
+{
+ if (!au_test_nfsd())
+ return -EBUSY;
+ return -ESTALE;
+}
+#else
+AuStubInt0(au_test_nfsd, void)
+AuStubVoid(au_export_init, struct super_block *sb)
+AuStubVoid(au_xigen_inc, struct inode *inode)
+AuStubInt0(au_xigen_new, struct inode *inode)
+AuStubInt0(au_xigen_set, struct super_block *sb, struct path *path)
+AuStubVoid(au_xigen_clr, struct super_block *sb)
+AuStub(int, au_busy_or_stale, return -EBUSY, void)
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_SBILIST
+/* module.c */
+extern struct hlist_bl_head au_sbilist;
+
+static inline void au_sbilist_init(void)
+{
+ INIT_HLIST_BL_HEAD(&au_sbilist);
+}
+
+static inline void au_sbilist_add(struct super_block *sb)
+{
+ au_hbl_add(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+static inline void au_sbilist_del(struct super_block *sb)
+{
+ au_hbl_del(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+static inline void au_sbilist_lock(void)
+{
+ hlist_bl_lock(&au_sbilist);
+}
+
+static inline void au_sbilist_unlock(void)
+{
+ hlist_bl_unlock(&au_sbilist);
+}
+#define AuGFP_SBILIST GFP_ATOMIC
+#else
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST GFP_NOFS
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+#else
+AuStubVoid(au_sbilist_init, void)
+AuStubVoid(au_sbilist_add, struct super_block *sb)
+AuStubVoid(au_sbilist_del, struct super_block *sb)
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST GFP_NOFS
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo)
+{
+ /*
+ * This function is a dynamic '__init' function actually,
+ * so the tiny check for si_rwsem is unnecessary.
+ */
+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+#ifdef CONFIG_DEBUG_FS
+ sbinfo->si_dbgaufs = NULL;
+ sbinfo->si_dbgaufs_plink = NULL;
+ sbinfo->si_dbgaufs_xib = NULL;
+#ifdef CONFIG_AUFS_EXPORT
+ sbinfo->si_dbgaufs_xigen = NULL;
+#endif
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* current->atomic_flags */
+/* this value should never corrupt the ones defined in linux/sched.h */
+#define PFA_AUFS 7
+
+TASK_PFA_TEST(AUFS, test_aufs) /* task_test_aufs */
+TASK_PFA_SET(AUFS, aufs) /* task_set_aufs */
+TASK_PFA_CLEAR(AUFS, aufs) /* task_clear_aufs */
+
+static inline int si_pid_test(struct super_block *sb)
+{
+ return !!task_test_aufs(current);
+}
+
+static inline void si_pid_clr(struct super_block *sb)
+{
+ AuDebugOn(!task_test_aufs(current));
+ task_clear_aufs(current);
+}
+
+static inline void si_pid_set(struct super_block *sb)
+{
+ AuDebugOn(task_test_aufs(current));
+ task_set_aufs(current);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock superblock. mainly for entry point functions */
+#define __si_read_lock(sb) au_rw_read_lock(&au_sbi(sb)->si_rwsem)
+#define __si_write_lock(sb) au_rw_write_lock(&au_sbi(sb)->si_rwsem)
+#define __si_read_trylock(sb) au_rw_read_trylock(&au_sbi(sb)->si_rwsem)
+#define __si_write_trylock(sb) au_rw_write_trylock(&au_sbi(sb)->si_rwsem)
+/*
+#define __si_read_trylock_nested(sb) \
+ au_rw_read_trylock_nested(&au_sbi(sb)->si_rwsem)
+#define __si_write_trylock_nested(sb) \
+ au_rw_write_trylock_nested(&au_sbi(sb)->si_rwsem)
+*/
+
+#define __si_read_unlock(sb) au_rw_read_unlock(&au_sbi(sb)->si_rwsem)
+#define __si_write_unlock(sb) au_rw_write_unlock(&au_sbi(sb)->si_rwsem)
+#define __si_downgrade_lock(sb) au_rw_dgrade_lock(&au_sbi(sb)->si_rwsem)
+
+#define SiMustNoWaiters(sb) AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem)
+#define SiMustAnyLock(sb) AuRwMustAnyLock(&au_sbi(sb)->si_rwsem)
+#define SiMustWriteLock(sb) AuRwMustWriteLock(&au_sbi(sb)->si_rwsem)
+
+static inline void si_noflush_read_lock(struct super_block *sb)
+{
+ __si_read_lock(sb);
+ si_pid_set(sb);
+}
+
+static inline int si_noflush_read_trylock(struct super_block *sb)
+{
+ int locked;
+
+ locked = __si_read_trylock(sb);
+ if (locked)
+ si_pid_set(sb);
+ return locked;
+}
+
+static inline void si_noflush_write_lock(struct super_block *sb)
+{
+ __si_write_lock(sb);
+ si_pid_set(sb);
+}
+
+static inline int si_noflush_write_trylock(struct super_block *sb)
+{
+ int locked;
+
+ locked = __si_write_trylock(sb);
+ if (locked)
+ si_pid_set(sb);
+ return locked;
+}
+
+#if 0 /* reserved */
+static inline int si_read_trylock(struct super_block *sb, int flags)
+{
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&au_sbi(sb)->si_nowait);
+ return si_noflush_read_trylock(sb);
+}
+#endif
+
+static inline void si_read_unlock(struct super_block *sb)
+{
+ si_pid_clr(sb);
+ __si_read_unlock(sb);
+}
+
+#if 0 /* reserved */
+static inline int si_write_trylock(struct super_block *sb, int flags)
+{
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&au_sbi(sb)->si_nowait);
+ return si_noflush_write_trylock(sb);
+}
+#endif
+
+static inline void si_write_unlock(struct super_block *sb)
+{
+ si_pid_clr(sb);
+ __si_write_unlock(sb);
+}
+
+#if 0 /* reserved */
+static inline void si_downgrade_lock(struct super_block *sb)
+{
+ __si_downgrade_lock(sb);
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline aufs_bindex_t au_sbbot(struct super_block *sb)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_bbot;
+}
+
+static inline unsigned int au_mntflags(struct super_block *sb)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_mntflags;
+}
+
+static inline unsigned int au_sigen(struct super_block *sb)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_generation;
+}
+
+static inline struct au_branch *au_sbr(struct super_block *sb,
+ aufs_bindex_t bindex)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_branch[0 + bindex];
+}
+
+static inline loff_t au_xi_maxent(struct super_block *sb)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_ximaxent;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_SUPER_H__ */
diff --git a/fs/aufs/sysaufs.c b/fs/aufs/sysaufs.c
new file mode 100644
index 000000000000..32a0811c54de
--- /dev/null
+++ b/fs/aufs/sysaufs.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sysfs interface and lifetime management
+ * they are necessary regardless sysfs is disabled.
+ */
+
+#include <linux/random.h>
+#include "aufs.h"
+
+unsigned long sysaufs_si_mask;
+struct kset *sysaufs_kset;
+
+#define AuSiAttr(_name) { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = sysaufs_si_##_name, \
+}
+
+static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path);
+struct attribute *sysaufs_si_attrs[] = {
+ &sysaufs_si_attr_xi_path.attr,
+ NULL,
+};
+
+static const struct sysfs_ops au_sbi_ops = {
+ .show = sysaufs_si_show
+};
+
+static struct kobj_type au_sbi_ktype = {
+ .release = au_si_free,
+ .sysfs_ops = &au_sbi_ops,
+ .default_attrs = sysaufs_si_attrs
+};
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_init(struct au_sbinfo *sbinfo)
+{
+ int err;
+
+ sbinfo->si_kobj.kset = sysaufs_kset;
+ /* cf. sysaufs_name() */
+ err = kobject_init_and_add
+ (&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_kset->kobj*/NULL,
+ SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo));
+
+ return err;
+}
+
+void sysaufs_fin(void)
+{
+ sysfs_remove_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+ kset_unregister(sysaufs_kset);
+}
+
+int __init sysaufs_init(void)
+{
+ int err;
+
+ do {
+ get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask));
+ } while (!sysaufs_si_mask);
+
+ err = -EINVAL;
+ sysaufs_kset = kset_create_and_add(AUFS_NAME, NULL, fs_kobj);
+ if (unlikely(!sysaufs_kset))
+ goto out;
+ err = PTR_ERR(sysaufs_kset);
+ if (IS_ERR(sysaufs_kset))
+ goto out;
+ err = sysfs_create_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+ if (unlikely(err))
+ kset_unregister(sysaufs_kset);
+
+out:
+ return err;
+}
diff --git a/fs/aufs/sysaufs.h b/fs/aufs/sysaufs.h
new file mode 100644
index 000000000000..195fe279b820
--- /dev/null
+++ b/fs/aufs/sysaufs.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sysfs interface and mount lifetime management
+ */
+
+#ifndef __SYSAUFS_H__
+#define __SYSAUFS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/sysfs.h>
+#include "module.h"
+
+struct super_block;
+struct au_sbinfo;
+
+struct sysaufs_si_attr {
+ struct attribute attr;
+ int (*show)(struct seq_file *seq, struct super_block *sb);
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* sysaufs.c */
+extern unsigned long sysaufs_si_mask;
+extern struct kset *sysaufs_kset;
+extern struct attribute *sysaufs_si_attrs[];
+int sysaufs_si_init(struct au_sbinfo *sbinfo);
+int __init sysaufs_init(void);
+void sysaufs_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+/* some people doesn't like to show a pointer in kernel */
+static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo)
+{
+ return sysaufs_si_mask ^ (unsigned long)sbinfo;
+}
+
+#define SysaufsSiNamePrefix "si_"
+#define SysaufsSiNameLen (sizeof(SysaufsSiNamePrefix) + 16)
+static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name)
+{
+ snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx",
+ sysaufs_si_id(sbinfo));
+}
+
+struct au_branch;
+#ifdef CONFIG_SYSFS
+/* sysfs.c */
+extern struct attribute_group *sysaufs_attr_group;
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb);
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+long au_brinfo_ioctl(struct file *file, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg);
+#endif
+
+void sysaufs_br_init(struct au_branch *br);
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+
+#define sysaufs_brs_init() do {} while (0)
+
+#else
+#define sysaufs_attr_group NULL
+
+AuStubInt0(sysaufs_si_xi_path, struct seq_file *seq, struct super_block *sb)
+AuStub(ssize_t, sysaufs_si_show, return 0, struct kobject *kobj,
+ struct attribute *attr, char *buf)
+AuStubVoid(sysaufs_br_init, struct au_branch *br)
+AuStubVoid(sysaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(sysaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+
+static inline void sysaufs_brs_init(void)
+{
+ sysaufs_brs = 0;
+}
+
+#endif /* CONFIG_SYSFS */
+
+#endif /* __KERNEL__ */
+#endif /* __SYSAUFS_H__ */
diff --git a/fs/aufs/sysfs.c b/fs/aufs/sysfs.c
new file mode 100644
index 000000000000..e939d0208a99
--- /dev/null
+++ b/fs/aufs/sysfs.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sysfs interface
+ */
+
+#include <linux/compat.h>
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+#ifdef CONFIG_AUFS_FS_MODULE
+/* this entry violates the "one line per file" policy of sysfs */
+static ssize_t config_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ ssize_t err;
+ static char *conf =
+/* this file is generated at compiling */
+#include "conf.str"
+ ;
+
+ err = snprintf(buf, PAGE_SIZE, conf);
+ if (unlikely(err >= PAGE_SIZE))
+ err = -EFBIG;
+ return err;
+}
+
+static struct kobj_attribute au_config_attr = __ATTR_RO(config);
+#endif
+
+static struct attribute *au_attr[] = {
+#ifdef CONFIG_AUFS_FS_MODULE
+ &au_config_attr.attr,
+#endif
+ NULL, /* need to NULL terminate the list of attributes */
+};
+
+static struct attribute_group sysaufs_attr_group_body = {
+ .attrs = au_attr
+};
+
+struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body;
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb)
+{
+ int err;
+
+ SiMustAnyLock(sb);
+
+ err = 0;
+ if (au_opt_test(au_mntflags(sb), XINO)) {
+ err = au_xino_path(seq, au_sbi(sb)->si_xib);
+ seq_putc(seq, '\n');
+ }
+ return err;
+}
+
+/*
+ * the lifetime of branch is independent from the entry under sysfs.
+ * sysfs handles the lifetime of the entry, and never call ->show() after it is
+ * unlinked.
+ */
+static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb,
+ aufs_bindex_t bindex, int idx)
+{
+ int err;
+ struct path path;
+ struct dentry *root;
+ struct au_branch *br;
+ au_br_perm_str_t perm;
+
+ AuDbg("b%d\n", bindex);
+
+ err = 0;
+ root = sb->s_root;
+ di_read_lock_parent(root, !AuLock_IR);
+ br = au_sbr(sb, bindex);
+
+ switch (idx) {
+ case AuBrSysfs_BR:
+ path.mnt = au_br_mnt(br);
+ path.dentry = au_h_dptr(root, bindex);
+ err = au_seq_path(seq, &path);
+ if (!err) {
+ au_optstr_br_perm(&perm, br->br_perm);
+ seq_printf(seq, "=%s\n", perm.a);
+ }
+ break;
+ case AuBrSysfs_BRID:
+ seq_printf(seq, "%d\n", br->br_id);
+ break;
+ }
+ di_read_unlock(root, !AuLock_IR);
+ if (unlikely(err || seq_has_overflowed(seq)))
+ err = -E2BIG;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct seq_file *au_seq(char *p, ssize_t len)
+{
+ struct seq_file *seq;
+
+ seq = kzalloc(sizeof(*seq), GFP_NOFS);
+ if (seq) {
+ /* mutex_init(&seq.lock); */
+ seq->buf = p;
+ seq->size = len;
+ return seq; /* success */
+ }
+
+ seq = ERR_PTR(-ENOMEM);
+ return seq;
+}
+
+#define SysaufsBr_PREFIX "br"
+#define SysaufsBrid_PREFIX "brid"
+
+/* todo: file size may exceed PAGE_SIZE */
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ ssize_t err;
+ int idx;
+ long l;
+ aufs_bindex_t bbot;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+ struct seq_file *seq;
+ char *name;
+ struct attribute **cattr;
+
+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+ sb = sbinfo->si_sb;
+
+ /*
+ * prevent a race condition between sysfs and aufs.
+ * for instance, sysfs_file_read() calls sysfs_get_active_two() which
+ * prohibits maintaining the sysfs entries.
+ * hew we acquire read lock after sysfs_get_active_two().
+ * on the other hand, the remount process may maintain the sysfs/aufs
+ * entries after acquiring write lock.
+ * it can cause a deadlock.
+ * simply we gave up processing read here.
+ */
+ err = -EBUSY;
+ if (unlikely(!si_noflush_read_trylock(sb)))
+ goto out;
+
+ seq = au_seq(buf, PAGE_SIZE);
+ err = PTR_ERR(seq);
+ if (IS_ERR(seq))
+ goto out_unlock;
+
+ name = (void *)attr->name;
+ cattr = sysaufs_si_attrs;
+ while (*cattr) {
+ if (!strcmp(name, (*cattr)->name)) {
+ err = container_of(*cattr, struct sysaufs_si_attr, attr)
+ ->show(seq, sb);
+ goto out_seq;
+ }
+ cattr++;
+ }
+
+ if (!strncmp(name, SysaufsBrid_PREFIX,
+ sizeof(SysaufsBrid_PREFIX) - 1)) {
+ idx = AuBrSysfs_BRID;
+ name += sizeof(SysaufsBrid_PREFIX) - 1;
+ } else if (!strncmp(name, SysaufsBr_PREFIX,
+ sizeof(SysaufsBr_PREFIX) - 1)) {
+ idx = AuBrSysfs_BR;
+ name += sizeof(SysaufsBr_PREFIX) - 1;
+ } else
+ BUG();
+
+ err = kstrtol(name, 10, &l);
+ if (!err) {
+ bbot = au_sbbot(sb);
+ if (l <= bbot)
+ err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l, idx);
+ else
+ err = -ENOENT;
+ }
+
+out_seq:
+ if (!err) {
+ err = seq->count;
+ /* sysfs limit */
+ if (unlikely(err == PAGE_SIZE))
+ err = -EFBIG;
+ }
+ au_kfree_rcu(seq);
+out_unlock:
+ si_read_unlock(sb);
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_brinfo(struct super_block *sb, union aufs_brinfo __user *arg)
+{
+ int err;
+ int16_t brid;
+ aufs_bindex_t bindex, bbot;
+ size_t sz;
+ char *buf;
+ struct seq_file *seq;
+ struct au_branch *br;
+
+ si_read_lock(sb, AuLock_FLUSH);
+ bbot = au_sbbot(sb);
+ err = bbot + 1;
+ if (!arg)
+ goto out;
+
+ err = -ENOMEM;
+ buf = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!buf))
+ goto out;
+
+ seq = au_seq(buf, PAGE_SIZE);
+ err = PTR_ERR(seq);
+ if (IS_ERR(seq))
+ goto out_buf;
+
+ sz = sizeof(*arg) - offsetof(union aufs_brinfo, path);
+ for (bindex = 0; bindex <= bbot; bindex++, arg++) {
+ err = !access_ok(arg, sizeof(*arg));
+ if (unlikely(err))
+ break;
+
+ br = au_sbr(sb, bindex);
+ brid = br->br_id;
+ BUILD_BUG_ON(sizeof(brid) != sizeof(arg->id));
+ err = __put_user(brid, &arg->id);
+ if (unlikely(err))
+ break;
+
+ BUILD_BUG_ON(sizeof(br->br_perm) != sizeof(arg->perm));
+ err = __put_user(br->br_perm, &arg->perm);
+ if (unlikely(err))
+ break;
+
+ err = au_seq_path(seq, &br->br_path);
+ if (unlikely(err))
+ break;
+ seq_putc(seq, '\0');
+ if (!seq_has_overflowed(seq)) {
+ err = copy_to_user(arg->path, seq->buf, seq->count);
+ seq->count = 0;
+ if (unlikely(err))
+ break;
+ } else {
+ err = -E2BIG;
+ goto out_seq;
+ }
+ }
+ if (unlikely(err))
+ err = -EFAULT;
+
+out_seq:
+ au_kfree_rcu(seq);
+out_buf:
+ free_page((unsigned long)buf);
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+long au_brinfo_ioctl(struct file *file, unsigned long arg)
+{
+ return au_brinfo(file->f_path.dentry->d_sb, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg)
+{
+ return au_brinfo(file->f_path.dentry->d_sb, compat_ptr(arg));
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+void sysaufs_br_init(struct au_branch *br)
+{
+ int i;
+ struct au_brsysfs *br_sysfs;
+ struct attribute *attr;
+
+ br_sysfs = br->br_sysfs;
+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+ attr = &br_sysfs->attr;
+ sysfs_attr_init(attr);
+ attr->name = br_sysfs->name;
+ attr->mode = 0444;
+ br_sysfs++;
+ }
+}
+
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+ struct au_branch *br;
+ struct kobject *kobj;
+ struct au_brsysfs *br_sysfs;
+ int i;
+ aufs_bindex_t bbot;
+
+ if (!sysaufs_brs)
+ return;
+
+ kobj = &au_sbi(sb)->si_kobj;
+ bbot = au_sbbot(sb);
+ for (; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ br_sysfs = br->br_sysfs;
+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+ sysfs_remove_file(kobj, &br_sysfs->attr);
+ br_sysfs++;
+ }
+ }
+}
+
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
+{
+ int err, i;
+ aufs_bindex_t bbot;
+ struct kobject *kobj;
+ struct au_branch *br;
+ struct au_brsysfs *br_sysfs;
+
+ if (!sysaufs_brs)
+ return;
+
+ kobj = &au_sbi(sb)->si_kobj;
+ bbot = au_sbbot(sb);
+ for (; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ br_sysfs = br->br_sysfs;
+ snprintf(br_sysfs[AuBrSysfs_BR].name, sizeof(br_sysfs->name),
+ SysaufsBr_PREFIX "%d", bindex);
+ snprintf(br_sysfs[AuBrSysfs_BRID].name, sizeof(br_sysfs->name),
+ SysaufsBrid_PREFIX "%d", bindex);
+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+ err = sysfs_create_file(kobj, &br_sysfs->attr);
+ if (unlikely(err))
+ pr_warn("failed %s under sysfs(%d)\n",
+ br_sysfs->name, err);
+ br_sysfs++;
+ }
+ }
+}
diff --git a/fs/aufs/sysrq.c b/fs/aufs/sysrq.c
new file mode 100644
index 000000000000..db10ec528f5d
--- /dev/null
+++ b/fs/aufs/sysrq.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * magic sysrq handler
+ */
+
+/* #include <linux/sysrq.h> */
+#include <linux/writeback.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void sysrq_sb(struct super_block *sb)
+{
+ char *plevel;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+ struct hlist_bl_head *files;
+ struct hlist_bl_node *pos;
+ struct au_finfo *finfo;
+
+ plevel = au_plevel;
+ au_plevel = KERN_WARNING;
+
+ /* since we define pr_fmt, call printk directly */
+#define pr(str) printk(KERN_WARNING AUFS_NAME ": " str)
+
+ sbinfo = au_sbi(sb);
+ printk(KERN_WARNING "si=%lx\n", sysaufs_si_id(sbinfo));
+ pr("superblock\n");
+ au_dpri_sb(sb);
+
+#if 0
+ pr("root dentry\n");
+ au_dpri_dentry(sb->s_root);
+ pr("root inode\n");
+ au_dpri_inode(d_inode(sb->s_root));
+#endif
+
+#if 0
+ do {
+ int err, i, j, ndentry;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+
+ err = au_dpages_init(&dpages, GFP_ATOMIC);
+ if (unlikely(err))
+ break;
+ err = au_dcsub_pages(&dpages, sb->s_root, NULL, NULL);
+ if (!err)
+ for (i = 0; i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ ndentry = dpage->ndentry;
+ for (j = 0; j < ndentry; j++)
+ au_dpri_dentry(dpage->dentries[j]);
+ }
+ au_dpages_free(&dpages);
+ } while (0);
+#endif
+
+#if 1
+ {
+ struct inode *i;
+
+ pr("isolated inode\n");
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(i, &sb->s_inodes, i_sb_list) {
+ spin_lock(&i->i_lock);
+ if (1 || hlist_empty(&i->i_dentry))
+ au_dpri_inode(i);
+ spin_unlock(&i->i_lock);
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+ }
+#endif
+ pr("files\n");
+ files = &au_sbi(sb)->si_files;
+ hlist_bl_lock(files);
+ hlist_bl_for_each_entry(finfo, pos, files, fi_hlist) {
+ umode_t mode;
+
+ file = finfo->fi_file;
+ mode = file_inode(file)->i_mode;
+ if (!special_file(mode))
+ au_dpri_file(file);
+ }
+ hlist_bl_unlock(files);
+ pr("done\n");
+
+#undef pr
+ au_plevel = plevel;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* module parameter */
+static char *aufs_sysrq_key = "a";
+module_param_named(sysrq, aufs_sysrq_key, charp, 0444);
+MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME);
+
+static void au_sysrq(int key __maybe_unused)
+{
+ struct au_sbinfo *sbinfo;
+ struct hlist_bl_node *pos;
+
+ lockdep_off();
+ au_sbilist_lock();
+ hlist_bl_for_each_entry(sbinfo, pos, &au_sbilist, si_list)
+ sysrq_sb(sbinfo->si_sb);
+ au_sbilist_unlock();
+ lockdep_on();
+}
+
+static struct sysrq_key_op au_sysrq_op = {
+ .handler = au_sysrq,
+ .help_msg = "Aufs",
+ .action_msg = "Aufs",
+ .enable_mask = SYSRQ_ENABLE_DUMP
+};
+
+/* ---------------------------------------------------------------------- */
+
+int __init au_sysrq_init(void)
+{
+ int err;
+ char key;
+
+ err = -1;
+ key = *aufs_sysrq_key;
+ if ('a' <= key && key <= 'z')
+ err = register_sysrq_key(key, &au_sysrq_op);
+ if (unlikely(err))
+ pr_err("err %d, sysrq=%c\n", err, key);
+ return err;
+}
+
+void au_sysrq_fin(void)
+{
+ int err;
+
+ err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op);
+ if (unlikely(err))
+ pr_err("err %d (ignored)\n", err);
+}
diff --git a/fs/aufs/vdir.c b/fs/aufs/vdir.c
new file mode 100644
index 000000000000..e0a6e7e20d3a
--- /dev/null
+++ b/fs/aufs/vdir.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * virtual or vertical directory
+ */
+
+#include "aufs.h"
+
+static unsigned int calc_size(int nlen)
+{
+ return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t));
+}
+
+static int set_deblk_end(union au_vdir_deblk_p *p,
+ union au_vdir_deblk_p *deblk_end)
+{
+ if (calc_size(0) <= deblk_end->deblk - p->deblk) {
+ p->de->de_str.len = 0;
+ /* smp_mb(); */
+ return 0;
+ }
+ return -1; /* error */
+}
+
+/* returns true or false */
+static int is_deblk_end(union au_vdir_deblk_p *p,
+ union au_vdir_deblk_p *deblk_end)
+{
+ if (calc_size(0) <= deblk_end->deblk - p->deblk)
+ return !p->de->de_str.len;
+ return 1;
+}
+
+static unsigned char *last_deblk(struct au_vdir *vdir)
+{
+ return vdir->vd_deblk[vdir->vd_nblk - 1];
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* estimate the appropriate size for name hash table */
+unsigned int au_rdhash_est(loff_t sz)
+{
+ unsigned int n;
+
+ n = UINT_MAX;
+ sz >>= 10;
+ if (sz < n)
+ n = sz;
+ if (sz < AUFS_RDHASH_DEF)
+ n = AUFS_RDHASH_DEF;
+ /* pr_info("n %u\n", n); */
+ return n;
+}
+
+/*
+ * the allocated memory has to be freed by
+ * au_nhash_wh_free() or au_nhash_de_free().
+ */
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp)
+{
+ struct hlist_head *head;
+ unsigned int u;
+ size_t sz;
+
+ sz = sizeof(*nhash->nh_head) * num_hash;
+ head = kmalloc(sz, gfp);
+ if (head) {
+ nhash->nh_num = num_hash;
+ nhash->nh_head = head;
+ for (u = 0; u < num_hash; u++)
+ INIT_HLIST_HEAD(head++);
+ return 0; /* success */
+ }
+
+ return -ENOMEM;
+}
+
+static void nhash_count(struct hlist_head *head)
+{
+#if 0
+ unsigned long n;
+ struct hlist_node *pos;
+
+ n = 0;
+ hlist_for_each(pos, head)
+ n++;
+ pr_info("%lu\n", n);
+#endif
+}
+
+static void au_nhash_wh_do_free(struct hlist_head *head)
+{
+ struct au_vdir_wh *pos;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(pos, node, head, wh_hash)
+ au_kfree_rcu(pos);
+}
+
+static void au_nhash_de_do_free(struct hlist_head *head)
+{
+ struct au_vdir_dehstr *pos;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(pos, node, head, hash)
+ au_cache_free_vdir_dehstr(pos);
+}
+
+static void au_nhash_do_free(struct au_nhash *nhash,
+ void (*free)(struct hlist_head *head))
+{
+ unsigned int n;
+ struct hlist_head *head;
+
+ n = nhash->nh_num;
+ if (!n)
+ return;
+
+ head = nhash->nh_head;
+ while (n-- > 0) {
+ nhash_count(head);
+ free(head++);
+ }
+ au_kfree_try_rcu(nhash->nh_head);
+}
+
+void au_nhash_wh_free(struct au_nhash *whlist)
+{
+ au_nhash_do_free(whlist, au_nhash_wh_do_free);
+}
+
+static void au_nhash_de_free(struct au_nhash *delist)
+{
+ au_nhash_do_free(delist, au_nhash_de_do_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+ int limit)
+{
+ int num;
+ unsigned int u, n;
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+
+ num = 0;
+ n = whlist->nh_num;
+ head = whlist->nh_head;
+ for (u = 0; u < n; u++, head++)
+ hlist_for_each_entry(pos, head, wh_hash)
+ if (pos->wh_bindex == btgt && ++num > limit)
+ return 1;
+ return 0;
+}
+
+static struct hlist_head *au_name_hash(struct au_nhash *nhash,
+ unsigned char *name,
+ unsigned int len)
+{
+ unsigned int v;
+ /* const unsigned int magic_bit = 12; */
+
+ AuDebugOn(!nhash->nh_num || !nhash->nh_head);
+
+ v = 0;
+ if (len > 8)
+ len = 8;
+ while (len--)
+ v += *name++;
+ /* v = hash_long(v, magic_bit); */
+ v %= nhash->nh_num;
+ return nhash->nh_head + v;
+}
+
+static int au_nhash_test_name(struct au_vdir_destr *str, const char *name,
+ int nlen)
+{
+ return str->len == nlen && !memcmp(str->name, name, nlen);
+}
+
+/* returns found or not */
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen)
+{
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+ struct au_vdir_destr *str;
+
+ head = au_name_hash(whlist, name, nlen);
+ hlist_for_each_entry(pos, head, wh_hash) {
+ str = &pos->wh_str;
+ AuDbg("%.*s\n", str->len, str->name);
+ if (au_nhash_test_name(str, name, nlen))
+ return 1;
+ }
+ return 0;
+}
+
+/* returns found(true) or not */
+static int test_known(struct au_nhash *delist, char *name, int nlen)
+{
+ struct hlist_head *head;
+ struct au_vdir_dehstr *pos;
+ struct au_vdir_destr *str;
+
+ head = au_name_hash(delist, name, nlen);
+ hlist_for_each_entry(pos, head, hash) {
+ str = pos->str;
+ AuDbg("%.*s\n", str->len, str->name);
+ if (au_nhash_test_name(str, name, nlen))
+ return 1;
+ }
+ return 0;
+}
+
+static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino,
+ unsigned char d_type)
+{
+#ifdef CONFIG_AUFS_SHWH
+ wh->wh_ino = ino;
+ wh->wh_type = d_type;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+ unsigned int d_type, aufs_bindex_t bindex,
+ unsigned char shwh)
+{
+ int err;
+ struct au_vdir_destr *str;
+ struct au_vdir_wh *wh;
+
+ AuDbg("%.*s\n", nlen, name);
+ AuDebugOn(!whlist->nh_num || !whlist->nh_head);
+
+ err = -ENOMEM;
+ wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS);
+ if (unlikely(!wh))
+ goto out;
+
+ err = 0;
+ wh->wh_bindex = bindex;
+ if (shwh)
+ au_shwh_init_wh(wh, ino, d_type);
+ str = &wh->wh_str;
+ str->len = nlen;
+ memcpy(str->name, name, nlen);
+ hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen));
+ /* smp_mb(); */
+
+out:
+ return err;
+}
+
+static int append_deblk(struct au_vdir *vdir)
+{
+ int err;
+ unsigned long ul;
+ const unsigned int deblk_sz = vdir->vd_deblk_sz;
+ union au_vdir_deblk_p p, deblk_end;
+ unsigned char **o;
+
+ err = -ENOMEM;
+ o = au_krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
+ GFP_NOFS, /*may_shrink*/0);
+ if (unlikely(!o))
+ goto out;
+
+ vdir->vd_deblk = o;
+ p.deblk = kmalloc(deblk_sz, GFP_NOFS);
+ if (p.deblk) {
+ ul = vdir->vd_nblk++;
+ vdir->vd_deblk[ul] = p.deblk;
+ vdir->vd_last.ul = ul;
+ vdir->vd_last.p.deblk = p.deblk;
+ deblk_end.deblk = p.deblk + deblk_sz;
+ err = set_deblk_end(&p, &deblk_end);
+ }
+
+out:
+ return err;
+}
+
+static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino,
+ unsigned int d_type, struct au_nhash *delist)
+{
+ int err;
+ unsigned int sz;
+ const unsigned int deblk_sz = vdir->vd_deblk_sz;
+ union au_vdir_deblk_p p, *room, deblk_end;
+ struct au_vdir_dehstr *dehstr;
+
+ p.deblk = last_deblk(vdir);
+ deblk_end.deblk = p.deblk + deblk_sz;
+ room = &vdir->vd_last.p;
+ AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk
+ || !is_deblk_end(room, &deblk_end));
+
+ sz = calc_size(nlen);
+ if (unlikely(sz > deblk_end.deblk - room->deblk)) {
+ err = append_deblk(vdir);
+ if (unlikely(err))
+ goto out;
+
+ p.deblk = last_deblk(vdir);
+ deblk_end.deblk = p.deblk + deblk_sz;
+ /* smp_mb(); */
+ AuDebugOn(room->deblk != p.deblk);
+ }
+
+ err = -ENOMEM;
+ dehstr = au_cache_alloc_vdir_dehstr();
+ if (unlikely(!dehstr))
+ goto out;
+
+ dehstr->str = &room->de->de_str;
+ hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen));
+ room->de->de_ino = ino;
+ room->de->de_type = d_type;
+ room->de->de_str.len = nlen;
+ memcpy(room->de->de_str.name, name, nlen);
+
+ err = 0;
+ room->deblk += sz;
+ if (unlikely(set_deblk_end(room, &deblk_end)))
+ err = append_deblk(vdir);
+ /* smp_mb(); */
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_vdir_free(struct au_vdir *vdir)
+{
+ unsigned char **deblk;
+
+ deblk = vdir->vd_deblk;
+ while (vdir->vd_nblk--)
+ au_kfree_try_rcu(*deblk++);
+ au_kfree_try_rcu(vdir->vd_deblk);
+ au_cache_free_vdir(vdir);
+}
+
+static struct au_vdir *alloc_vdir(struct file *file)
+{
+ struct au_vdir *vdir;
+ struct super_block *sb;
+ int err;
+
+ sb = file->f_path.dentry->d_sb;
+ SiMustAnyLock(sb);
+
+ err = -ENOMEM;
+ vdir = au_cache_alloc_vdir();
+ if (unlikely(!vdir))
+ goto out;
+
+ vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS);
+ if (unlikely(!vdir->vd_deblk))
+ goto out_free;
+
+ vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk;
+ if (!vdir->vd_deblk_sz) {
+ /* estimate the appropriate size for deblk */
+ vdir->vd_deblk_sz = au_dir_size(file, /*dentry*/NULL);
+ /* pr_info("vd_deblk_sz %u\n", vdir->vd_deblk_sz); */
+ }
+ vdir->vd_nblk = 0;
+ vdir->vd_version = 0;
+ vdir->vd_jiffy = 0;
+ err = append_deblk(vdir);
+ if (!err)
+ return vdir; /* success */
+
+ au_kfree_try_rcu(vdir->vd_deblk);
+
+out_free:
+ au_cache_free_vdir(vdir);
+out:
+ vdir = ERR_PTR(err);
+ return vdir;
+}
+
+static int reinit_vdir(struct au_vdir *vdir)
+{
+ int err;
+ union au_vdir_deblk_p p, deblk_end;
+
+ while (vdir->vd_nblk > 1) {
+ au_kfree_try_rcu(vdir->vd_deblk[vdir->vd_nblk - 1]);
+ /* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */
+ vdir->vd_nblk--;
+ }
+ p.deblk = vdir->vd_deblk[0];
+ deblk_end.deblk = p.deblk + vdir->vd_deblk_sz;
+ err = set_deblk_end(&p, &deblk_end);
+ /* keep vd_dblk_sz */
+ vdir->vd_last.ul = 0;
+ vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+ vdir->vd_version = 0;
+ vdir->vd_jiffy = 0;
+ /* smp_mb(); */
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuFillVdir_CALLED 1
+#define AuFillVdir_WHABLE (1 << 1)
+#define AuFillVdir_SHWH (1 << 2)
+#define au_ftest_fillvdir(flags, name) ((flags) & AuFillVdir_##name)
+#define au_fset_fillvdir(flags, name) \
+ do { (flags) |= AuFillVdir_##name; } while (0)
+#define au_fclr_fillvdir(flags, name) \
+ do { (flags) &= ~AuFillVdir_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuFillVdir_SHWH
+#define AuFillVdir_SHWH 0
+#endif
+
+struct fillvdir_arg {
+ struct dir_context ctx;
+ struct file *file;
+ struct au_vdir *vdir;
+ struct au_nhash delist;
+ struct au_nhash whlist;
+ aufs_bindex_t bindex;
+ unsigned int flags;
+ int err;
+};
+
+static int fillvdir(struct dir_context *ctx, const char *__name, int nlen,
+ loff_t offset __maybe_unused, u64 h_ino,
+ unsigned int d_type)
+{
+ struct fillvdir_arg *arg = container_of(ctx, struct fillvdir_arg, ctx);
+ char *name = (void *)__name;
+ struct super_block *sb;
+ ino_t ino;
+ const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH);
+
+ arg->err = 0;
+ sb = arg->file->f_path.dentry->d_sb;
+ au_fset_fillvdir(arg->flags, CALLED);
+ /* smp_mb(); */
+ if (nlen <= AUFS_WH_PFX_LEN
+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+ if (test_known(&arg->delist, name, nlen)
+ || au_nhash_test_known_wh(&arg->whlist, name, nlen))
+ goto out; /* already exists or whiteouted */
+
+ arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino);
+ if (!arg->err) {
+ if (unlikely(nlen > AUFS_MAX_NAMELEN))
+ d_type = DT_UNKNOWN;
+ arg->err = append_de(arg->vdir, name, nlen, ino,
+ d_type, &arg->delist);
+ }
+ } else if (au_ftest_fillvdir(arg->flags, WHABLE)) {
+ name += AUFS_WH_PFX_LEN;
+ nlen -= AUFS_WH_PFX_LEN;
+ if (au_nhash_test_known_wh(&arg->whlist, name, nlen))
+ goto out; /* already whiteouted */
+
+ ino = 0; /* just to suppress a warning */
+ if (shwh)
+ arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type,
+ &ino);
+ if (!arg->err) {
+ if (nlen <= AUFS_MAX_NAMELEN + AUFS_WH_PFX_LEN)
+ d_type = DT_UNKNOWN;
+ arg->err = au_nhash_append_wh
+ (&arg->whlist, name, nlen, ino, d_type,
+ arg->bindex, shwh);
+ }
+ }
+
+out:
+ if (!arg->err)
+ arg->vdir->vd_jiffy = jiffies;
+ /* smp_mb(); */
+ AuTraceErr(arg->err);
+ return arg->err;
+}
+
+static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir,
+ struct au_nhash *whlist, struct au_nhash *delist)
+{
+#ifdef CONFIG_AUFS_SHWH
+ int err;
+ unsigned int nh, u;
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+ struct hlist_node *n;
+ char *p, *o;
+ struct au_vdir_destr *destr;
+
+ AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH));
+
+ err = -ENOMEM;
+ o = p = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!p))
+ goto out;
+
+ err = 0;
+ nh = whlist->nh_num;
+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+ p += AUFS_WH_PFX_LEN;
+ for (u = 0; u < nh; u++) {
+ head = whlist->nh_head + u;
+ hlist_for_each_entry_safe(pos, n, head, wh_hash) {
+ destr = &pos->wh_str;
+ memcpy(p, destr->name, destr->len);
+ err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN,
+ pos->wh_ino, pos->wh_type, delist);
+ if (unlikely(err))
+ break;
+ }
+ }
+
+ free_page((unsigned long)o);
+
+out:
+ AuTraceErr(err);
+ return err;
+#else
+ return 0;
+#endif
+}
+
+static int au_do_read_vdir(struct fillvdir_arg *arg)
+{
+ int err;
+ unsigned int rdhash;
+ loff_t offset;
+ aufs_bindex_t bbot, bindex, btop;
+ unsigned char shwh;
+ struct file *hf, *file;
+ struct super_block *sb;
+
+ file = arg->file;
+ sb = file->f_path.dentry->d_sb;
+ SiMustAnyLock(sb);
+
+ rdhash = au_sbi(sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = au_rdhash_est(au_dir_size(file, /*dentry*/NULL));
+ err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out_delist;
+
+ err = 0;
+ arg->flags = 0;
+ shwh = 0;
+ if (au_opt_test(au_mntflags(sb), SHWH)) {
+ shwh = 1;
+ au_fset_fillvdir(arg->flags, SHWH);
+ }
+ btop = au_fbtop(file);
+ bbot = au_fbbot_dir(file);
+ for (bindex = btop; !err && bindex <= bbot; bindex++) {
+ hf = au_hf_dir(file, bindex);
+ if (!hf)
+ continue;
+
+ offset = vfsub_llseek(hf, 0, SEEK_SET);
+ err = offset;
+ if (unlikely(offset))
+ break;
+
+ arg->bindex = bindex;
+ au_fclr_fillvdir(arg->flags, WHABLE);
+ if (shwh
+ || (bindex != bbot
+ && au_br_whable(au_sbr_perm(sb, bindex))))
+ au_fset_fillvdir(arg->flags, WHABLE);
+ do {
+ arg->err = 0;
+ au_fclr_fillvdir(arg->flags, CALLED);
+ /* smp_mb(); */
+ err = vfsub_iterate_dir(hf, &arg->ctx);
+ if (err >= 0)
+ err = arg->err;
+ } while (!err && au_ftest_fillvdir(arg->flags, CALLED));
+
+ /*
+ * dir_relax() may be good for concurrency, but aufs should not
+ * use it since it will cause a lockdep problem.
+ */
+ }
+
+ if (!err && shwh)
+ err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist);
+
+ au_nhash_wh_free(&arg->whlist);
+
+out_delist:
+ au_nhash_de_free(&arg->delist);
+out:
+ return err;
+}
+
+static int read_vdir(struct file *file, int may_read)
+{
+ int err;
+ unsigned long expire;
+ unsigned char do_read;
+ struct fillvdir_arg arg = {
+ .ctx = {
+ .actor = fillvdir
+ }
+ };
+ struct inode *inode;
+ struct au_vdir *vdir, *allocated;
+
+ err = 0;
+ inode = file_inode(file);
+ IMustLock(inode);
+ IiMustWriteLock(inode);
+ SiMustAnyLock(inode->i_sb);
+
+ allocated = NULL;
+ do_read = 0;
+ expire = au_sbi(inode->i_sb)->si_rdcache;
+ vdir = au_ivdir(inode);
+ if (!vdir) {
+ do_read = 1;
+ vdir = alloc_vdir(file);
+ err = PTR_ERR(vdir);
+ if (IS_ERR(vdir))
+ goto out;
+ err = 0;
+ allocated = vdir;
+ } else if (may_read
+ && (!inode_eq_iversion(inode, vdir->vd_version)
+ || time_after(jiffies, vdir->vd_jiffy + expire))) {
+ do_read = 1;
+ err = reinit_vdir(vdir);
+ if (unlikely(err))
+ goto out;
+ }
+
+ if (!do_read)
+ return 0; /* success */
+
+ arg.file = file;
+ arg.vdir = vdir;
+ err = au_do_read_vdir(&arg);
+ if (!err) {
+ /* file->f_pos = 0; */ /* todo: ctx->pos? */
+ vdir->vd_version = inode_query_iversion(inode);
+ vdir->vd_last.ul = 0;
+ vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+ if (allocated)
+ au_set_ivdir(inode, allocated);
+ } else if (allocated)
+ au_vdir_free(allocated);
+
+out:
+ return err;
+}
+
+static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
+{
+ int err, rerr;
+ unsigned long ul, n;
+ const unsigned int deblk_sz = src->vd_deblk_sz;
+
+ AuDebugOn(tgt->vd_nblk != 1);
+
+ err = -ENOMEM;
+ if (tgt->vd_nblk < src->vd_nblk) {
+ unsigned char **p;
+
+ p = au_krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
+ GFP_NOFS, /*may_shrink*/0);
+ if (unlikely(!p))
+ goto out;
+ tgt->vd_deblk = p;
+ }
+
+ if (tgt->vd_deblk_sz != deblk_sz) {
+ unsigned char *p;
+
+ tgt->vd_deblk_sz = deblk_sz;
+ p = au_krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS,
+ /*may_shrink*/1);
+ if (unlikely(!p))
+ goto out;
+ tgt->vd_deblk[0] = p;
+ }
+ memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz);
+ tgt->vd_version = src->vd_version;
+ tgt->vd_jiffy = src->vd_jiffy;
+
+ n = src->vd_nblk;
+ for (ul = 1; ul < n; ul++) {
+ tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz,
+ GFP_NOFS);
+ if (unlikely(!tgt->vd_deblk[ul]))
+ goto out;
+ tgt->vd_nblk++;
+ }
+ tgt->vd_nblk = n;
+ tgt->vd_last.ul = tgt->vd_last.ul;
+ tgt->vd_last.p.deblk = tgt->vd_deblk[tgt->vd_last.ul];
+ tgt->vd_last.p.deblk += src->vd_last.p.deblk
+ - src->vd_deblk[src->vd_last.ul];
+ /* smp_mb(); */
+ return 0; /* success */
+
+out:
+ rerr = reinit_vdir(tgt);
+ BUG_ON(rerr);
+ return err;
+}
+
+int au_vdir_init(struct file *file)
+{
+ int err;
+ struct inode *inode;
+ struct au_vdir *vdir_cache, *allocated;
+
+ /* test file->f_pos here instead of ctx->pos */
+ err = read_vdir(file, !file->f_pos);
+ if (unlikely(err))
+ goto out;
+
+ allocated = NULL;
+ vdir_cache = au_fvdir_cache(file);
+ if (!vdir_cache) {
+ vdir_cache = alloc_vdir(file);
+ err = PTR_ERR(vdir_cache);
+ if (IS_ERR(vdir_cache))
+ goto out;
+ allocated = vdir_cache;
+ } else if (!file->f_pos && vdir_cache->vd_version != file->f_version) {
+ /* test file->f_pos here instead of ctx->pos */
+ err = reinit_vdir(vdir_cache);
+ if (unlikely(err))
+ goto out;
+ } else
+ return 0; /* success */
+
+ inode = file_inode(file);
+ err = copy_vdir(vdir_cache, au_ivdir(inode));
+ if (!err) {
+ file->f_version = inode_query_iversion(inode);
+ if (allocated)
+ au_set_fvdir_cache(file, allocated);
+ } else if (allocated)
+ au_vdir_free(allocated);
+
+out:
+ return err;
+}
+
+static loff_t calc_offset(struct au_vdir *vdir)
+{
+ loff_t offset;
+ union au_vdir_deblk_p p;
+
+ p.deblk = vdir->vd_deblk[vdir->vd_last.ul];
+ offset = vdir->vd_last.p.deblk - p.deblk;
+ offset += vdir->vd_deblk_sz * vdir->vd_last.ul;
+ return offset;
+}
+
+/* returns true or false */
+static int seek_vdir(struct file *file, struct dir_context *ctx)
+{
+ int valid;
+ unsigned int deblk_sz;
+ unsigned long ul, n;
+ loff_t offset;
+ union au_vdir_deblk_p p, deblk_end;
+ struct au_vdir *vdir_cache;
+
+ valid = 1;
+ vdir_cache = au_fvdir_cache(file);
+ offset = calc_offset(vdir_cache);
+ AuDbg("offset %lld\n", offset);
+ if (ctx->pos == offset)
+ goto out;
+
+ vdir_cache->vd_last.ul = 0;
+ vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0];
+ if (!ctx->pos)
+ goto out;
+
+ valid = 0;
+ deblk_sz = vdir_cache->vd_deblk_sz;
+ ul = div64_u64(ctx->pos, deblk_sz);
+ AuDbg("ul %lu\n", ul);
+ if (ul >= vdir_cache->vd_nblk)
+ goto out;
+
+ n = vdir_cache->vd_nblk;
+ for (; ul < n; ul++) {
+ p.deblk = vdir_cache->vd_deblk[ul];
+ deblk_end.deblk = p.deblk + deblk_sz;
+ offset = ul;
+ offset *= deblk_sz;
+ while (!is_deblk_end(&p, &deblk_end) && offset < ctx->pos) {
+ unsigned int l;
+
+ l = calc_size(p.de->de_str.len);
+ offset += l;
+ p.deblk += l;
+ }
+ if (!is_deblk_end(&p, &deblk_end)) {
+ valid = 1;
+ vdir_cache->vd_last.ul = ul;
+ vdir_cache->vd_last.p = p;
+ break;
+ }
+ }
+
+out:
+ /* smp_mb(); */
+ if (!valid)
+ AuDbg("valid %d\n", !valid);
+ return valid;
+}
+
+int au_vdir_fill_de(struct file *file, struct dir_context *ctx)
+{
+ unsigned int l, deblk_sz;
+ union au_vdir_deblk_p deblk_end;
+ struct au_vdir *vdir_cache;
+ struct au_vdir_de *de;
+
+ if (!seek_vdir(file, ctx))
+ return 0;
+
+ vdir_cache = au_fvdir_cache(file);
+ deblk_sz = vdir_cache->vd_deblk_sz;
+ while (1) {
+ deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+ deblk_end.deblk += deblk_sz;
+ while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) {
+ de = vdir_cache->vd_last.p.de;
+ AuDbg("%.*s, off%lld, i%lu, dt%d\n",
+ de->de_str.len, de->de_str.name, ctx->pos,
+ (unsigned long)de->de_ino, de->de_type);
+ if (unlikely(!dir_emit(ctx, de->de_str.name,
+ de->de_str.len, de->de_ino,
+ de->de_type))) {
+ /* todo: ignore the error caused by udba? */
+ /* return err; */
+ return 0;
+ }
+
+ l = calc_size(de->de_str.len);
+ vdir_cache->vd_last.p.deblk += l;
+ ctx->pos += l;
+ }
+ if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) {
+ vdir_cache->vd_last.ul++;
+ vdir_cache->vd_last.p.deblk
+ = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+ ctx->pos = deblk_sz * vdir_cache->vd_last.ul;
+ continue;
+ }
+ break;
+ }
+
+ /* smp_mb(); */
+ return 0;
+}
diff --git a/fs/aufs/vfsub.c b/fs/aufs/vfsub.c
new file mode 100644
index 000000000000..d6945e22593e
--- /dev/null
+++ b/fs/aufs/vfsub.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#include <linux/mnt_namespace.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/security.h>
+#include <linux/splice.h>
+#include "aufs.h"
+
+#ifdef CONFIG_AUFS_BR_FUSE
+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb)
+{
+ if (!au_test_fuse(h_sb) || !au_userns)
+ return 0;
+
+ return is_current_mnt_ns(mnt) ? 0 : -EACCES;
+}
+#endif
+
+int vfsub_sync_filesystem(struct super_block *h_sb, int wait)
+{
+ int err;
+
+ lockdep_off();
+ down_read(&h_sb->s_umount);
+ err = __sync_filesystem(h_sb, wait);
+ up_read(&h_sb->s_umount);
+ lockdep_on();
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_update_h_iattr(struct path *h_path, int *did)
+{
+ int err;
+ struct kstat st;
+ struct super_block *h_sb;
+
+ /* for remote fs, leave work for its getattr or d_revalidate */
+ /* for bad i_attr fs, handle them in aufs_getattr() */
+ /* still some fs may acquire i_mutex. we need to skip them */
+ err = 0;
+ if (!did)
+ did = &err;
+ h_sb = h_path->dentry->d_sb;
+ *did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb));
+ if (*did)
+ err = vfsub_getattr(h_path, &st);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct file *vfsub_dentry_open(struct path *path, int flags)
+{
+ struct file *file;
+
+ file = dentry_open(path, flags /* | __FMODE_NONOTIFY */,
+ current_cred());
+ if (!IS_ERR_OR_NULL(file)
+ && (file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ i_readcount_inc(d_inode(path->dentry));
+
+ return file;
+}
+
+struct file *vfsub_filp_open(const char *path, int oflags, int mode)
+{
+ struct file *file;
+
+ lockdep_off();
+ file = filp_open(path,
+ oflags /* | __FMODE_NONOTIFY */,
+ mode);
+ lockdep_on();
+ if (IS_ERR(file))
+ goto out;
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+
+out:
+ return file;
+}
+
+/*
+ * Ideally this function should call VFS:do_last() in order to keep all its
+ * checkings. But it is very hard for aufs to regenerate several VFS internal
+ * structure such as nameidata. This is a second (or third) best approach.
+ * cf. linux/fs/namei.c:do_last(), lookup_open() and atomic_open().
+ */
+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct vfsub_aopen_args *args)
+{
+ int err;
+ struct au_branch *br = args->br;
+ struct file *file = args->file;
+ /* copied from linux/fs/namei.c:atomic_open() */
+ struct dentry *const DENTRY_NOT_SET = (void *)-1UL;
+
+ IMustLock(dir);
+ AuDebugOn(!dir->i_op->atomic_open);
+
+ err = au_br_test_oflag(args->open_flag, br);
+ if (unlikely(err))
+ goto out;
+
+ au_lcnt_inc(&br->br_nfiles);
+ file->f_path.dentry = DENTRY_NOT_SET;
+ file->f_path.mnt = au_br_mnt(br);
+ AuDbg("%ps\n", dir->i_op->atomic_open);
+ err = dir->i_op->atomic_open(dir, dentry, file, args->open_flag,
+ args->create_mode);
+ if (unlikely(err < 0)) {
+ au_lcnt_dec(&br->br_nfiles);
+ goto out;
+ }
+
+ /* temporary workaround for nfsv4 branch */
+ if (au_test_nfs(dir->i_sb))
+ nfs_mark_for_revalidate(dir);
+
+ if (file->f_mode & FMODE_CREATED)
+ fsnotify_create(dir, dentry);
+ if (!(file->f_mode & FMODE_OPENED)) {
+ au_lcnt_dec(&br->br_nfiles);
+ goto out;
+ }
+
+ /* todo: call VFS:may_open() here */
+ /* todo: ima_file_check() too? */
+ if (!err && (args->open_flag & __FMODE_EXEC))
+ err = deny_write_access(file);
+ if (!err)
+ fsnotify_open(file);
+ else
+ au_lcnt_dec(&br->br_nfiles);
+ /* note that the file is created and still opened */
+
+out:
+ return err;
+}
+
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path)
+{
+ int err;
+
+ err = kern_path(name, flags, path);
+ if (!err && d_is_positive(path->dentry))
+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+struct dentry *vfsub_lookup_one_len_unlocked(const char *name,
+ struct dentry *parent, int len)
+{
+ struct path path = {
+ .mnt = NULL
+ };
+
+ path.dentry = lookup_one_len_unlocked(name, parent, len);
+ if (IS_ERR(path.dentry))
+ goto out;
+ if (d_is_positive(path.dentry))
+ vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+ AuTraceErrPtr(path.dentry);
+ return path.dentry;
+}
+
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+ int len)
+{
+ struct path path = {
+ .mnt = NULL
+ };
+
+ /* VFS checks it too, but by WARN_ON_ONCE() */
+ IMustLock(d_inode(parent));
+
+ path.dentry = lookup_one_len(name, parent, len);
+ if (IS_ERR(path.dentry))
+ goto out;
+ if (d_is_positive(path.dentry))
+ vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+ AuTraceErrPtr(path.dentry);
+ return path.dentry;
+}
+
+void vfsub_call_lkup_one(void *args)
+{
+ struct vfsub_lkup_one_args *a = args;
+ *a->errp = vfsub_lkup_one(a->name, a->parent);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+ struct dentry *d2, struct au_hinode *hdir2)
+{
+ struct dentry *d;
+
+ lockdep_off();
+ d = lock_rename(d1, d2);
+ lockdep_on();
+ au_hn_suspend(hdir1);
+ if (hdir1 != hdir2)
+ au_hn_suspend(hdir2);
+
+ return d;
+}
+
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+ struct dentry *d2, struct au_hinode *hdir2)
+{
+ au_hn_resume(hdir1);
+ if (hdir1 != hdir2)
+ au_hn_resume(hdir2);
+ lockdep_off();
+ unlock_rename(d1, d2);
+ lockdep_on();
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_create(struct inode *dir, struct path *path, int mode, bool want_excl)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_mknod(path, d, mode, 0);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_create(dir, path->dentry, mode, want_excl);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_symlink(struct inode *dir, struct path *path, const char *symname)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_symlink(path, d, symname);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_symlink(dir, path->dentry, symname);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_mknod(path, d, mode, new_encode_dev(dev));
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_mknod(dir, path->dentry, mode, dev);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+static int au_test_nlink(struct inode *inode)
+{
+ const unsigned int link_max = UINT_MAX >> 1; /* rough margin */
+
+ if (!au_test_fs_no_limit_nlink(inode->i_sb)
+ || inode->i_nlink < link_max)
+ return 0;
+ return -EMLINK;
+}
+
+int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path,
+ struct inode **delegated_inode)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ err = au_test_nlink(d_inode(src_dentry));
+ if (unlikely(err))
+ return err;
+
+ /* we don't call may_linkat() */
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_link(src_dentry, path, d);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_link(src_dentry, dir, path->dentry, delegated_inode);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ /* fuse has different memory inode for the same inumber */
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ tmp.dentry = src_dentry;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry,
+ struct inode *dir, struct path *path,
+ struct inode **delegated_inode, unsigned int flags)
+{
+ int err;
+ struct path tmp = {
+ .mnt = path->mnt
+ };
+ struct dentry *d;
+
+ IMustLock(dir);
+ IMustLock(src_dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ tmp.dentry = src_dentry->d_parent;
+ err = security_path_rename(&tmp, src_dentry, path, d, /*flags*/0);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_rename(src_dir, src_dentry, dir, path->dentry,
+ delegated_inode, flags);
+ lockdep_on();
+ if (!err) {
+ int did;
+
+ tmp.dentry = d->d_parent;
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = src_dentry;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ tmp.dentry = src_dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_mkdir(path, d, mode);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_mkdir(dir, path->dentry, mode);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_rmdir(struct inode *dir, struct path *path)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_rmdir(path, d);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_rmdir(dir, path->dentry);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = {
+ .dentry = path->dentry->d_parent,
+ .mnt = path->mnt
+ };
+
+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: support mmap_sem? */
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t err;
+
+ lockdep_off();
+ err = vfs_read(file, ubuf, count, ppos);
+ lockdep_on();
+ if (err >= 0)
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+/* todo: kernel_read()? */
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t err;
+ mm_segment_t oldfs;
+ union {
+ void *k;
+ char __user *u;
+ } buf;
+
+ buf.k = kbuf;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = vfsub_read_u(file, buf.u, count, ppos);
+ set_fs(oldfs);
+ return err;
+}
+
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t err;
+
+ lockdep_off();
+ err = vfs_write(file, ubuf, count, ppos);
+ lockdep_on();
+ if (err >= 0)
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos)
+{
+ ssize_t err;
+ mm_segment_t oldfs;
+ union {
+ void *k;
+ const char __user *u;
+ } buf;
+
+ buf.k = kbuf;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = vfsub_write_u(file, buf.u, count, ppos);
+ set_fs(oldfs);
+ return err;
+}
+
+int vfsub_flush(struct file *file, fl_owner_t id)
+{
+ int err;
+
+ err = 0;
+ if (file->f_op->flush) {
+ if (!au_test_nfs(file->f_path.dentry->d_sb))
+ err = file->f_op->flush(file, id);
+ else {
+ lockdep_off();
+ err = file->f_op->flush(file, id);
+ lockdep_on();
+ }
+ if (!err)
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL);
+ /*ignore*/
+ }
+ return err;
+}
+
+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx)
+{
+ int err;
+
+ AuDbg("%pD, ctx{%ps, %llu}\n", file, ctx->actor, ctx->pos);
+
+ lockdep_off();
+ err = iterate_dir(file, ctx);
+ lockdep_on();
+ if (err >= 0)
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+
+ return err;
+}
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ long err;
+
+ lockdep_off();
+ err = do_splice_to(in, ppos, pipe, len, flags);
+ lockdep_on();
+ file_accessed(in);
+ if (err >= 0)
+ vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+{
+ long err;
+
+ lockdep_off();
+ err = do_splice_from(pipe, out, ppos, len, flags);
+ lockdep_on();
+ if (err >= 0)
+ vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+int vfsub_fsync(struct file *file, struct path *path, int datasync)
+{
+ int err;
+
+ /* file can be NULL */
+ lockdep_off();
+ err = vfs_fsync(file, datasync);
+ lockdep_on();
+ if (!err) {
+ if (!path) {
+ AuDebugOn(!file);
+ path = &file->f_path;
+ }
+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+ }
+ return err;
+}
+
+/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+ struct file *h_file)
+{
+ int err;
+ struct inode *h_inode;
+ struct super_block *h_sb;
+
+ if (!h_file) {
+ err = vfsub_truncate(h_path, length);
+ goto out;
+ }
+
+ h_inode = d_inode(h_path->dentry);
+ h_sb = h_inode->i_sb;
+ lockdep_off();
+ sb_start_write(h_sb);
+ lockdep_on();
+ err = locks_verify_truncate(h_inode, h_file, length);
+ if (!err)
+ err = security_path_truncate(h_path);
+ if (!err) {
+ lockdep_off();
+ err = do_truncate(h_path->dentry, length, attr, h_file);
+ lockdep_on();
+ }
+ lockdep_off();
+ sb_end_write(h_sb);
+ lockdep_on();
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_vfsub_mkdir_args {
+ int *errp;
+ struct inode *dir;
+ struct path *path;
+ int mode;
+};
+
+static void au_call_vfsub_mkdir(void *args)
+{
+ struct au_vfsub_mkdir_args *a = args;
+ *a->errp = vfsub_mkdir(a->dir, a->path, a->mode);
+}
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode)
+{
+ int err, do_sio, wkq_err;
+
+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+ if (!do_sio) {
+ lockdep_off();
+ err = vfsub_mkdir(dir, path, mode);
+ lockdep_on();
+ } else {
+ struct au_vfsub_mkdir_args args = {
+ .errp = &err,
+ .dir = dir,
+ .path = path,
+ .mode = mode
+ };
+ wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ return err;
+}
+
+struct au_vfsub_rmdir_args {
+ int *errp;
+ struct inode *dir;
+ struct path *path;
+};
+
+static void au_call_vfsub_rmdir(void *args)
+{
+ struct au_vfsub_rmdir_args *a = args;
+ *a->errp = vfsub_rmdir(a->dir, a->path);
+}
+
+int vfsub_sio_rmdir(struct inode *dir, struct path *path)
+{
+ int err, do_sio, wkq_err;
+
+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+ if (!do_sio) {
+ lockdep_off();
+ err = vfsub_rmdir(dir, path);
+ lockdep_on();
+ } else {
+ struct au_vfsub_rmdir_args args = {
+ .errp = &err,
+ .dir = dir,
+ .path = path
+ };
+ wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct notify_change_args {
+ int *errp;
+ struct path *path;
+ struct iattr *ia;
+ struct inode **delegated_inode;
+};
+
+static void call_notify_change(void *args)
+{
+ struct notify_change_args *a = args;
+ struct inode *h_inode;
+
+ h_inode = d_inode(a->path->dentry);
+ IMustLock(h_inode);
+
+ *a->errp = -EPERM;
+ if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) {
+ lockdep_off();
+ *a->errp = notify_change(a->path->dentry, a->ia,
+ a->delegated_inode);
+ lockdep_on();
+ if (!*a->errp)
+ vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/
+ }
+ AuTraceErr(*a->errp);
+}
+
+int vfsub_notify_change(struct path *path, struct iattr *ia,
+ struct inode **delegated_inode)
+{
+ int err;
+ struct notify_change_args args = {
+ .errp = &err,
+ .path = path,
+ .ia = ia,
+ .delegated_inode = delegated_inode
+ };
+
+ call_notify_change(&args);
+
+ return err;
+}
+
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia,
+ struct inode **delegated_inode)
+{
+ int err, wkq_err;
+ struct notify_change_args args = {
+ .errp = &err,
+ .path = path,
+ .ia = ia,
+ .delegated_inode = delegated_inode
+ };
+
+ wkq_err = au_wkq_wait(call_notify_change, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct unlink_args {
+ int *errp;
+ struct inode *dir;
+ struct path *path;
+ struct inode **delegated_inode;
+};
+
+static void call_unlink(void *args)
+{
+ struct unlink_args *a = args;
+ struct dentry *d = a->path->dentry;
+ struct inode *h_inode;
+ const int stop_sillyrename = (au_test_nfs(d->d_sb)
+ && au_dcount(d) == 1);
+
+ IMustLock(a->dir);
+
+ a->path->dentry = d->d_parent;
+ *a->errp = security_path_unlink(a->path, d);
+ a->path->dentry = d;
+ if (unlikely(*a->errp))
+ return;
+
+ if (!stop_sillyrename)
+ dget(d);
+ h_inode = NULL;
+ if (d_is_positive(d)) {
+ h_inode = d_inode(d);
+ ihold(h_inode);
+ }
+
+ lockdep_off();
+ *a->errp = vfs_unlink(a->dir, d, a->delegated_inode);
+ lockdep_on();
+ if (!*a->errp) {
+ struct path tmp = {
+ .dentry = d->d_parent,
+ .mnt = a->path->mnt
+ };
+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+ }
+
+ if (!stop_sillyrename)
+ dput(d);
+ if (h_inode)
+ iput(h_inode);
+
+ AuTraceErr(*a->errp);
+}
+
+/*
+ * @dir: must be locked.
+ * @dentry: target dentry.
+ */
+int vfsub_unlink(struct inode *dir, struct path *path,
+ struct inode **delegated_inode, int force)
+{
+ int err;
+ struct unlink_args args = {
+ .errp = &err,
+ .dir = dir,
+ .path = path,
+ .delegated_inode = delegated_inode
+ };
+
+ if (!force)
+ call_unlink(&args);
+ else {
+ int wkq_err;
+
+ wkq_err = au_wkq_wait(call_unlink, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ return err;
+}
diff --git a/fs/aufs/vfsub.h b/fs/aufs/vfsub.h
new file mode 100644
index 000000000000..43779266a991
--- /dev/null
+++ b/fs/aufs/vfsub.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#ifndef __AUFS_VFSUB_H__
+#define __AUFS_VFSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/iversion.h>
+#include <linux/mount.h>
+#include <linux/posix_acl.h>
+#include <linux/xattr.h>
+#include "debug.h"
+
+/* copied from linux/fs/internal.h */
+/* todo: BAD approach!! */
+extern void __mnt_drop_write(struct vfsmount *);
+extern struct file *alloc_empty_file(int, const struct cred *);
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for lower inode */
+/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */
+/* reduce? gave up. */
+enum {
+ AuLsc_I_Begin = I_MUTEX_PARENT2, /* 5 */
+ AuLsc_I_PARENT, /* lower inode, parent first */
+ AuLsc_I_PARENT2, /* copyup dirs */
+ AuLsc_I_PARENT3, /* copyup wh */
+ AuLsc_I_CHILD,
+ AuLsc_I_CHILD2,
+ AuLsc_I_End
+};
+
+/* to debug easier, do not make them inlined functions */
+#define MtxMustLock(mtx) AuDebugOn(!mutex_is_locked(mtx))
+#define IMustLock(i) AuDebugOn(!inode_is_locked(i))
+
+/* ---------------------------------------------------------------------- */
+
+static inline void vfsub_drop_nlink(struct inode *inode)
+{
+ AuDebugOn(!inode->i_nlink);
+ drop_nlink(inode);
+}
+
+static inline void vfsub_dead_dir(struct inode *inode)
+{
+ AuDebugOn(!S_ISDIR(inode->i_mode));
+ inode->i_flags |= S_DEAD;
+ clear_nlink(inode);
+}
+
+static inline int vfsub_native_ro(struct inode *inode)
+{
+ return sb_rdonly(inode->i_sb)
+ || IS_RDONLY(inode)
+ /* || IS_APPEND(inode) */
+ || IS_IMMUTABLE(inode);
+}
+
+#ifdef CONFIG_AUFS_BR_FUSE
+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb);
+#else
+AuStubInt0(vfsub_test_mntns, struct vfsmount *mnt, struct super_block *h_sb);
+#endif
+
+int vfsub_sync_filesystem(struct super_block *h_sb, int wait);
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_update_h_iattr(struct path *h_path, int *did);
+struct file *vfsub_dentry_open(struct path *path, int flags);
+struct file *vfsub_filp_open(const char *path, int oflags, int mode);
+struct au_branch;
+struct vfsub_aopen_args {
+ struct file *file;
+ unsigned int open_flag;
+ umode_t create_mode;
+ struct au_branch *br;
+};
+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct vfsub_aopen_args *args);
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path);
+
+struct dentry *vfsub_lookup_one_len_unlocked(const char *name,
+ struct dentry *parent, int len);
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+ int len);
+
+struct vfsub_lkup_one_args {
+ struct dentry **errp;
+ struct qstr *name;
+ struct dentry *parent;
+};
+
+static inline struct dentry *vfsub_lkup_one(struct qstr *name,
+ struct dentry *parent)
+{
+ return vfsub_lookup_one_len(name->name, parent, name->len);
+}
+
+void vfsub_call_lkup_one(void *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int vfsub_mnt_want_write(struct vfsmount *mnt)
+{
+ int err;
+
+ lockdep_off();
+ err = mnt_want_write(mnt);
+ lockdep_on();
+ return err;
+}
+
+static inline void vfsub_mnt_drop_write(struct vfsmount *mnt)
+{
+ lockdep_off();
+ mnt_drop_write(mnt);
+ lockdep_on();
+}
+
+#if 0 /* reserved */
+static inline void vfsub_mnt_drop_write_file(struct file *file)
+{
+ lockdep_off();
+ mnt_drop_write_file(file);
+ lockdep_on();
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+struct au_hinode;
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+ struct dentry *d2, struct au_hinode *hdir2);
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+ struct dentry *d2, struct au_hinode *hdir2);
+
+int vfsub_create(struct inode *dir, struct path *path, int mode,
+ bool want_excl);
+int vfsub_symlink(struct inode *dir, struct path *path,
+ const char *symname);
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev);
+int vfsub_link(struct dentry *src_dentry, struct inode *dir,
+ struct path *path, struct inode **delegated_inode);
+int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry,
+ struct inode *hdir, struct path *path,
+ struct inode **delegated_inode, unsigned int flags);
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_rmdir(struct inode *dir, struct path *path);
+
+/* ---------------------------------------------------------------------- */
+
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos);
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+ loff_t *ppos);
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+ loff_t *ppos);
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count,
+ loff_t *ppos);
+int vfsub_flush(struct file *file, fl_owner_t id);
+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx);
+
+static inline loff_t vfsub_f_size_read(struct file *file)
+{
+ return i_size_read(file_inode(file));
+}
+
+static inline unsigned int vfsub_file_flags(struct file *file)
+{
+ unsigned int flags;
+
+ spin_lock(&file->f_lock);
+ flags = file->f_flags;
+ spin_unlock(&file->f_lock);
+
+ return flags;
+}
+
+static inline int vfsub_file_execed(struct file *file)
+{
+ /* todo: direct access f_flags */
+ return !!(vfsub_file_flags(file) & __FMODE_EXEC);
+}
+
+#if 0 /* reserved */
+static inline void vfsub_file_accessed(struct file *h_file)
+{
+ file_accessed(h_file);
+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/
+}
+#endif
+
+#if 0 /* reserved */
+static inline void vfsub_touch_atime(struct vfsmount *h_mnt,
+ struct dentry *h_dentry)
+{
+ struct path h_path = {
+ .dentry = h_dentry,
+ .mnt = h_mnt
+ };
+ touch_atime(&h_path);
+ vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
+}
+#endif
+
+static inline int vfsub_update_time(struct inode *h_inode,
+ struct timespec64 *ts, int flags)
+{
+ return update_time(h_inode, ts, flags);
+ /* no vfsub_update_h_iattr() since we don't have struct path */
+}
+
+#ifdef CONFIG_FS_POSIX_ACL
+static inline int vfsub_acl_chmod(struct inode *h_inode, umode_t h_mode)
+{
+ int err;
+
+ err = posix_acl_chmod(h_inode, h_mode);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+#else
+AuStubInt0(vfsub_acl_chmod, struct inode *h_inode, umode_t h_mode);
+#endif
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
+
+static inline long vfsub_truncate(struct path *path, loff_t length)
+{
+ long err;
+
+ lockdep_off();
+ err = vfs_truncate(path, length);
+ lockdep_on();
+ return err;
+}
+
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+ struct file *h_file);
+int vfsub_fsync(struct file *file, struct path *path, int datasync);
+
+/*
+ * re-use branch fs's ioctl(FICLONE) while aufs itself doesn't support such
+ * ioctl.
+ */
+static inline loff_t vfsub_clone_file_range(struct file *src, struct file *dst,
+ loff_t len)
+{
+ loff_t err;
+
+ lockdep_off();
+ err = vfs_clone_file_range(src, 0, dst, 0, len, /*remap_flags*/0);
+ lockdep_on();
+
+ return err;
+}
+
+/* copy_file_range(2) is a systemcall */
+static inline ssize_t vfsub_copy_file_range(struct file *src, loff_t src_pos,
+ struct file *dst, loff_t dst_pos,
+ size_t len, unsigned int flags)
+{
+ ssize_t ssz;
+
+ lockdep_off();
+ ssz = vfs_copy_file_range(src, src_pos, dst, dst_pos, len, flags);
+ lockdep_on();
+
+ return ssz;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin)
+{
+ loff_t err;
+
+ lockdep_off();
+ err = vfs_llseek(file, offset, origin);
+ lockdep_on();
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_sio_rmdir(struct inode *dir, struct path *path);
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia,
+ struct inode **delegated_inode);
+int vfsub_notify_change(struct path *path, struct iattr *ia,
+ struct inode **delegated_inode);
+int vfsub_unlink(struct inode *dir, struct path *path,
+ struct inode **delegated_inode, int force);
+
+static inline int vfsub_getattr(const struct path *path, struct kstat *st)
+{
+ return vfs_getattr(path, st, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline int vfsub_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ int err;
+
+ lockdep_off();
+ err = vfs_setxattr(dentry, name, value, size, flags);
+ lockdep_on();
+
+ return err;
+}
+
+static inline int vfsub_removexattr(struct dentry *dentry, const char *name)
+{
+ int err;
+
+ lockdep_off();
+ err = vfs_removexattr(dentry, name);
+ lockdep_on();
+
+ return err;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_VFSUB_H__ */
diff --git a/fs/aufs/wbr_policy.c b/fs/aufs/wbr_policy.c
new file mode 100644
index 000000000000..64954145c0ec
--- /dev/null
+++ b/fs/aufs/wbr_policy.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * policies for selecting one among multiple writable branches
+ */
+
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/* subset of cpup_attr() */
+static noinline_for_stack
+int au_cpdown_attr(struct path *h_path, struct dentry *h_src)
+{
+ int err, sbits;
+ struct iattr ia;
+ struct inode *h_isrc;
+
+ h_isrc = d_inode(h_src);
+ ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID;
+ ia.ia_mode = h_isrc->i_mode;
+ ia.ia_uid = h_isrc->i_uid;
+ ia.ia_gid = h_isrc->i_gid;
+ sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID));
+ au_cpup_attr_flags(d_inode(h_path->dentry), h_isrc->i_flags);
+ /* no delegation since it is just created */
+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL);
+
+ /* is this nfs only? */
+ if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) {
+ ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+ ia.ia_mode = h_isrc->i_mode;
+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL);
+ }
+
+ return err;
+}
+
+#define AuCpdown_PARENT_OPQ 1
+#define AuCpdown_WHED (1 << 1)
+#define AuCpdown_MADE_DIR (1 << 2)
+#define AuCpdown_DIROPQ (1 << 3)
+#define au_ftest_cpdown(flags, name) ((flags) & AuCpdown_##name)
+#define au_fset_cpdown(flags, name) \
+ do { (flags) |= AuCpdown_##name; } while (0)
+#define au_fclr_cpdown(flags, name) \
+ do { (flags) &= ~AuCpdown_##name; } while (0)
+
+static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst,
+ unsigned int *flags)
+{
+ int err;
+ struct dentry *opq_dentry;
+
+ opq_dentry = au_diropq_create(dentry, bdst);
+ err = PTR_ERR(opq_dentry);
+ if (IS_ERR(opq_dentry))
+ goto out;
+ dput(opq_dentry);
+ au_fset_cpdown(*flags, DIROPQ);
+
+out:
+ return err;
+}
+
+static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent,
+ struct inode *dir, aufs_bindex_t bdst)
+{
+ int err;
+ struct path h_path;
+ struct au_branch *br;
+
+ br = au_sbr(dentry->d_sb, bdst);
+ h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+ err = PTR_ERR(h_path.dentry);
+ if (IS_ERR(h_path.dentry))
+ goto out;
+
+ err = 0;
+ if (d_is_positive(h_path.dentry)) {
+ h_path.mnt = au_br_mnt(br);
+ err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path,
+ dentry);
+ }
+ dput(h_path.dentry);
+
+out:
+ return err;
+}
+
+static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst,
+ struct au_pin *pin,
+ struct dentry *h_parent, void *arg)
+{
+ int err, rerr;
+ aufs_bindex_t bopq, btop;
+ struct path h_path;
+ struct dentry *parent;
+ struct inode *h_dir, *h_inode, *inode, *dir;
+ unsigned int *flags = arg;
+
+ btop = au_dbtop(dentry);
+ /* dentry is di-locked */
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ h_dir = d_inode(h_parent);
+ AuDebugOn(h_dir != au_h_iptr(dir, bdst));
+ IMustLock(h_dir);
+
+ err = au_lkup_neg(dentry, bdst, /*wh*/0);
+ if (unlikely(err < 0))
+ goto out;
+ h_path.dentry = au_h_dptr(dentry, bdst);
+ h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst);
+ err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path, 0755);
+ if (unlikely(err))
+ goto out_put;
+ au_fset_cpdown(*flags, MADE_DIR);
+
+ bopq = au_dbdiropq(dentry);
+ au_fclr_cpdown(*flags, WHED);
+ au_fclr_cpdown(*flags, DIROPQ);
+ if (au_dbwh(dentry) == bdst)
+ au_fset_cpdown(*flags, WHED);
+ if (!au_ftest_cpdown(*flags, PARENT_OPQ) && bopq <= bdst)
+ au_fset_cpdown(*flags, PARENT_OPQ);
+ h_inode = d_inode(h_path.dentry);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ if (au_ftest_cpdown(*flags, WHED)) {
+ err = au_cpdown_dir_opq(dentry, bdst, flags);
+ if (unlikely(err)) {
+ inode_unlock(h_inode);
+ goto out_dir;
+ }
+ }
+
+ err = au_cpdown_attr(&h_path, au_h_dptr(dentry, btop));
+ inode_unlock(h_inode);
+ if (unlikely(err))
+ goto out_opq;
+
+ if (au_ftest_cpdown(*flags, WHED)) {
+ err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst);
+ if (unlikely(err))
+ goto out_opq;
+ }
+
+ inode = d_inode(dentry);
+ if (au_ibbot(inode) < bdst)
+ au_set_ibbot(inode, bdst);
+ au_set_h_iptr(inode, bdst, au_igrab(h_inode),
+ au_hi_flags(inode, /*isdir*/1));
+ au_fhsm_wrote(dentry->d_sb, bdst, /*force*/0);
+ goto out; /* success */
+
+ /* revert */
+out_opq:
+ if (au_ftest_cpdown(*flags, DIROPQ)) {
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ rerr = au_diropq_remove(dentry, bdst);
+ inode_unlock(h_inode);
+ if (unlikely(rerr)) {
+ AuIOErr("failed removing diropq for %pd b%d (%d)\n",
+ dentry, bdst, rerr);
+ err = -EIO;
+ goto out;
+ }
+ }
+out_dir:
+ if (au_ftest_cpdown(*flags, MADE_DIR)) {
+ rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path);
+ if (unlikely(rerr)) {
+ AuIOErr("failed removing %pd b%d (%d)\n",
+ dentry, bdst, rerr);
+ err = -EIO;
+ }
+ }
+out_put:
+ au_set_h_dptr(dentry, bdst, NULL);
+ if (au_dbbot(dentry) == bdst)
+ au_update_dbbot(dentry);
+out:
+ dput(parent);
+ return err;
+}
+
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+ int err;
+ unsigned int flags;
+
+ flags = 0;
+ err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &flags);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for create */
+
+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ int err, i, j, ndentry;
+ aufs_bindex_t bopq;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry **dentries, *parent, *d;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ parent = dget_parent(dentry);
+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/0);
+ if (unlikely(err))
+ goto out_free;
+
+ err = bindex;
+ for (i = 0; i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ dentries = dpage->dentries;
+ ndentry = dpage->ndentry;
+ for (j = 0; j < ndentry; j++) {
+ d = dentries[j];
+ di_read_lock_parent2(d, !AuLock_IR);
+ bopq = au_dbdiropq(d);
+ di_read_unlock(d, !AuLock_IR);
+ if (bopq >= 0 && bopq < err)
+ err = bopq;
+ }
+ }
+
+out_free:
+ dput(parent);
+ au_dpages_free(&dpages);
+out:
+ return err;
+}
+
+static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex)
+{
+ for (; bindex >= 0; bindex--)
+ if (!au_br_rdonly(au_sbr(sb, bindex)))
+ return bindex;
+ return -EROFS;
+}
+
+/* top down parent */
+static int au_wbr_create_tdp(struct dentry *dentry,
+ unsigned int flags __maybe_unused)
+{
+ int err;
+ aufs_bindex_t btop, bindex;
+ struct super_block *sb;
+ struct dentry *parent, *h_parent;
+
+ sb = dentry->d_sb;
+ btop = au_dbtop(dentry);
+ err = btop;
+ if (!au_br_rdonly(au_sbr(sb, btop)))
+ goto out;
+
+ err = -EROFS;
+ parent = dget_parent(dentry);
+ for (bindex = au_dbtop(parent); bindex < btop; bindex++) {
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || d_is_negative(h_parent))
+ continue;
+
+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
+ err = bindex;
+ break;
+ }
+ }
+ dput(parent);
+
+ /* bottom up here */
+ if (unlikely(err < 0)) {
+ err = au_wbr_bu(sb, btop - 1);
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+ }
+
+out:
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* an exception for the policy other than tdp */
+static int au_wbr_create_exp(struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t bwh, bdiropq;
+ struct dentry *parent;
+
+ err = -1;
+ bwh = au_dbwh(dentry);
+ parent = dget_parent(dentry);
+ bdiropq = au_dbdiropq(parent);
+ if (bwh >= 0) {
+ if (bdiropq >= 0)
+ err = min(bdiropq, bwh);
+ else
+ err = bwh;
+ AuDbg("%d\n", err);
+ } else if (bdiropq >= 0) {
+ err = bdiropq;
+ AuDbg("%d\n", err);
+ }
+ dput(parent);
+
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+
+ if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err)))
+ err = -1;
+
+ AuDbg("%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* round robin */
+static int au_wbr_create_init_rr(struct super_block *sb)
+{
+ int err;
+
+ err = au_wbr_bu(sb, au_sbbot(sb));
+ atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */
+ /* smp_mb(); */
+
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+static int au_wbr_create_rr(struct dentry *dentry, unsigned int flags)
+{
+ int err, nbr;
+ unsigned int u;
+ aufs_bindex_t bindex, bbot;
+ struct super_block *sb;
+ atomic_t *next;
+
+ err = au_wbr_create_exp(dentry);
+ if (err >= 0)
+ goto out;
+
+ sb = dentry->d_sb;
+ next = &au_sbi(sb)->si_wbr_rr_next;
+ bbot = au_sbbot(sb);
+ nbr = bbot + 1;
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ if (!au_ftest_wbr(flags, DIR)) {
+ err = atomic_dec_return(next) + 1;
+ /* modulo for 0 is meaningless */
+ if (unlikely(!err))
+ err = atomic_dec_return(next) + 1;
+ } else
+ err = atomic_read(next);
+ AuDbg("%d\n", err);
+ u = err;
+ err = u % nbr;
+ AuDbg("%d\n", err);
+ if (!au_br_rdonly(au_sbr(sb, err)))
+ break;
+ err = -EROFS;
+ }
+
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+
+out:
+ AuDbg("%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space */
+static void au_mfs(struct dentry *dentry, struct dentry *parent)
+{
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_wbr_mfs *mfs;
+ struct dentry *h_parent;
+ aufs_bindex_t bindex, bbot;
+ int err;
+ unsigned long long b, bavail;
+ struct path h_path;
+ /* reduce the stack usage */
+ struct kstatfs *st;
+
+ st = kmalloc(sizeof(*st), GFP_NOFS);
+ if (unlikely(!st)) {
+ AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM);
+ return;
+ }
+
+ bavail = 0;
+ sb = dentry->d_sb;
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ MtxMustLock(&mfs->mfs_lock);
+ mfs->mfs_bindex = -EROFS;
+ mfs->mfsrr_bytes = 0;
+ if (!parent) {
+ bindex = 0;
+ bbot = au_sbbot(sb);
+ } else {
+ bindex = au_dbtop(parent);
+ bbot = au_dbtaildir(parent);
+ }
+
+ for (; bindex <= bbot; bindex++) {
+ if (parent) {
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || d_is_negative(h_parent))
+ continue;
+ }
+ br = au_sbr(sb, bindex);
+ if (au_br_rdonly(br))
+ continue;
+
+ /* sb->s_root for NFS is unreliable */
+ h_path.mnt = au_br_mnt(br);
+ h_path.dentry = h_path.mnt->mnt_root;
+ err = vfs_statfs(&h_path, st);
+ if (unlikely(err)) {
+ AuWarn1("failed statfs, b%d, %d\n", bindex, err);
+ continue;
+ }
+
+ /* when the available size is equal, select the lower one */
+ BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail)
+ || sizeof(b) < sizeof(st->f_bsize));
+ b = st->f_bavail * st->f_bsize;
+ br->br_wbr->wbr_bytes = b;
+ if (b >= bavail) {
+ bavail = b;
+ mfs->mfs_bindex = bindex;
+ mfs->mfs_jiffy = jiffies;
+ }
+ }
+
+ mfs->mfsrr_bytes = bavail;
+ AuDbg("b%d\n", mfs->mfs_bindex);
+ au_kfree_rcu(st);
+}
+
+static int au_wbr_create_mfs(struct dentry *dentry, unsigned int flags)
+{
+ int err;
+ struct dentry *parent;
+ struct super_block *sb;
+ struct au_wbr_mfs *mfs;
+
+ err = au_wbr_create_exp(dentry);
+ if (err >= 0)
+ goto out;
+
+ sb = dentry->d_sb;
+ parent = NULL;
+ if (au_ftest_wbr(flags, PARENT))
+ parent = dget_parent(dentry);
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ mutex_lock(&mfs->mfs_lock);
+ if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
+ || mfs->mfs_bindex < 0
+ || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex)))
+ au_mfs(dentry, parent);
+ mutex_unlock(&mfs->mfs_lock);
+ err = mfs->mfs_bindex;
+ dput(parent);
+
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+
+out:
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+static int au_wbr_create_init_mfs(struct super_block *sb)
+{
+ struct au_wbr_mfs *mfs;
+
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ mutex_init(&mfs->mfs_lock);
+ mfs->mfs_jiffy = 0;
+ mfs->mfs_bindex = -EROFS;
+
+ return 0;
+}
+
+static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused)
+{
+ mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock);
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* top down regardless parent, and then mfs */
+static int au_wbr_create_tdmfs(struct dentry *dentry,
+ unsigned int flags __maybe_unused)
+{
+ int err;
+ aufs_bindex_t bwh, btail, bindex, bfound, bmfs;
+ unsigned long long watermark;
+ struct super_block *sb;
+ struct au_wbr_mfs *mfs;
+ struct au_branch *br;
+ struct dentry *parent;
+
+ sb = dentry->d_sb;
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ mutex_lock(&mfs->mfs_lock);
+ if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
+ || mfs->mfs_bindex < 0)
+ au_mfs(dentry, /*parent*/NULL);
+ watermark = mfs->mfsrr_watermark;
+ bmfs = mfs->mfs_bindex;
+ mutex_unlock(&mfs->mfs_lock);
+
+ /* another style of au_wbr_create_exp() */
+ bwh = au_dbwh(dentry);
+ parent = dget_parent(dentry);
+ btail = au_dbtaildir(parent);
+ if (bwh >= 0 && bwh < btail)
+ btail = bwh;
+
+ err = au_wbr_nonopq(dentry, btail);
+ if (unlikely(err < 0))
+ goto out;
+ btail = err;
+ bfound = -1;
+ for (bindex = 0; bindex <= btail; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_rdonly(br))
+ continue;
+ if (br->br_wbr->wbr_bytes > watermark) {
+ bfound = bindex;
+ break;
+ }
+ }
+ err = bfound;
+ if (err < 0)
+ err = bmfs;
+
+out:
+ dput(parent);
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space and then round robin */
+static int au_wbr_create_mfsrr(struct dentry *dentry, unsigned int flags)
+{
+ int err;
+ struct au_wbr_mfs *mfs;
+
+ err = au_wbr_create_mfs(dentry, flags);
+ if (err >= 0) {
+ mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs;
+ mutex_lock(&mfs->mfs_lock);
+ if (mfs->mfsrr_bytes < mfs->mfsrr_watermark)
+ err = au_wbr_create_rr(dentry, flags);
+ mutex_unlock(&mfs->mfs_lock);
+ }
+
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+static int au_wbr_create_init_mfsrr(struct super_block *sb)
+{
+ int err;
+
+ au_wbr_create_init_mfs(sb); /* ignore */
+ err = au_wbr_create_init_rr(sb);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* top down parent and most free space */
+static int au_wbr_create_pmfs(struct dentry *dentry, unsigned int flags)
+{
+ int err, e2;
+ unsigned long long b;
+ aufs_bindex_t bindex, btop, bbot;
+ struct super_block *sb;
+ struct dentry *parent, *h_parent;
+ struct au_branch *br;
+
+ err = au_wbr_create_tdp(dentry, flags);
+ if (unlikely(err < 0))
+ goto out;
+ parent = dget_parent(dentry);
+ btop = au_dbtop(parent);
+ bbot = au_dbtaildir(parent);
+ if (btop == bbot)
+ goto out_parent; /* success */
+
+ e2 = au_wbr_create_mfs(dentry, flags);
+ if (e2 < 0)
+ goto out_parent; /* success */
+
+ /* when the available size is equal, select upper one */
+ sb = dentry->d_sb;
+ br = au_sbr(sb, err);
+ b = br->br_wbr->wbr_bytes;
+ AuDbg("b%d, %llu\n", err, b);
+
+ for (bindex = btop; bindex <= bbot; bindex++) {
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || d_is_negative(h_parent))
+ continue;
+
+ br = au_sbr(sb, bindex);
+ if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) {
+ b = br->br_wbr->wbr_bytes;
+ err = bindex;
+ AuDbg("b%d, %llu\n", err, b);
+ }
+ }
+
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+
+out_parent:
+ dput(parent);
+out:
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * - top down parent
+ * - most free space with parent
+ * - most free space round-robin regardless parent
+ */
+static int au_wbr_create_pmfsrr(struct dentry *dentry, unsigned int flags)
+{
+ int err;
+ unsigned long long watermark;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_wbr_mfs *mfs;
+
+ err = au_wbr_create_pmfs(dentry, flags | AuWbr_PARENT);
+ if (unlikely(err < 0))
+ goto out;
+
+ sb = dentry->d_sb;
+ br = au_sbr(sb, err);
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ mutex_lock(&mfs->mfs_lock);
+ watermark = mfs->mfsrr_watermark;
+ mutex_unlock(&mfs->mfs_lock);
+ if (br->br_wbr->wbr_bytes < watermark)
+ /* regardless the parent dir */
+ err = au_wbr_create_mfsrr(dentry, flags);
+
+out:
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for copyup */
+
+/* top down parent */
+static int au_wbr_copyup_tdp(struct dentry *dentry)
+{
+ return au_wbr_create_tdp(dentry, /*flags, anything is ok*/0);
+}
+
+/* bottom up parent */
+static int au_wbr_copyup_bup(struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t bindex, btop;
+ struct dentry *parent, *h_parent;
+ struct super_block *sb;
+
+ err = -EROFS;
+ sb = dentry->d_sb;
+ parent = dget_parent(dentry);
+ btop = au_dbtop(parent);
+ for (bindex = au_dbtop(dentry); bindex >= btop; bindex--) {
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || d_is_negative(h_parent))
+ continue;
+
+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
+ err = bindex;
+ break;
+ }
+ }
+ dput(parent);
+
+ /* bottom up here */
+ if (unlikely(err < 0))
+ err = au_wbr_bu(sb, btop - 1);
+
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* bottom up */
+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t btop)
+{
+ int err;
+
+ err = au_wbr_bu(dentry->d_sb, btop);
+ AuDbg("b%d\n", err);
+ if (err > btop)
+ err = au_wbr_nonopq(dentry, err);
+
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+static int au_wbr_copyup_bu(struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t btop;
+
+ btop = au_dbtop(dentry);
+ err = au_wbr_do_copyup_bu(dentry, btop);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_wbr_copyup_operations au_wbr_copyup_ops[] = {
+ [AuWbrCopyup_TDP] = {
+ .copyup = au_wbr_copyup_tdp
+ },
+ [AuWbrCopyup_BUP] = {
+ .copyup = au_wbr_copyup_bup
+ },
+ [AuWbrCopyup_BU] = {
+ .copyup = au_wbr_copyup_bu
+ }
+};
+
+struct au_wbr_create_operations au_wbr_create_ops[] = {
+ [AuWbrCreate_TDP] = {
+ .create = au_wbr_create_tdp
+ },
+ [AuWbrCreate_RR] = {
+ .create = au_wbr_create_rr,
+ .init = au_wbr_create_init_rr
+ },
+ [AuWbrCreate_MFS] = {
+ .create = au_wbr_create_mfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_MFSV] = {
+ .create = au_wbr_create_mfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_MFSRR] = {
+ .create = au_wbr_create_mfsrr,
+ .init = au_wbr_create_init_mfsrr,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_MFSRRV] = {
+ .create = au_wbr_create_mfsrr,
+ .init = au_wbr_create_init_mfsrr,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_TDMFS] = {
+ .create = au_wbr_create_tdmfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_TDMFSV] = {
+ .create = au_wbr_create_tdmfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_PMFS] = {
+ .create = au_wbr_create_pmfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_PMFSV] = {
+ .create = au_wbr_create_pmfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_PMFSRR] = {
+ .create = au_wbr_create_pmfsrr,
+ .init = au_wbr_create_init_mfsrr,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_PMFSRRV] = {
+ .create = au_wbr_create_pmfsrr,
+ .init = au_wbr_create_init_mfsrr,
+ .fin = au_wbr_create_fin_mfs
+ }
+};
diff --git a/fs/aufs/whout.c b/fs/aufs/whout.c
new file mode 100644
index 000000000000..6c9c8501562a
--- /dev/null
+++ b/fs/aufs/whout.c
@@ -0,0 +1,1062 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#include "aufs.h"
+
+#define WH_MASK 0444
+
+/*
+ * If a directory contains this file, then it is opaque. We start with the
+ * .wh. flag so that it is blocked by lookup.
+ */
+static struct qstr diropq_name = QSTR_INIT(AUFS_WH_DIROPQ,
+ sizeof(AUFS_WH_DIROPQ) - 1);
+
+/*
+ * generate whiteout name, which is NOT terminated by NULL.
+ * @name: original d_name.name
+ * @len: original d_name.len
+ * @wh: whiteout qstr
+ * returns zero when succeeds, otherwise error.
+ * succeeded value as wh->name should be freed by kfree().
+ */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name)
+{
+ char *p;
+
+ if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN))
+ return -ENAMETOOLONG;
+
+ wh->len = name->len + AUFS_WH_PFX_LEN;
+ p = kmalloc(wh->len, GFP_NOFS);
+ wh->name = p;
+ if (p) {
+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+ memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len);
+ /* smp_mb(); */
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if the @wh_name exists under @h_parent.
+ * @try_sio specifies the necessary of super-io.
+ */
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio)
+{
+ int err;
+ struct dentry *wh_dentry;
+
+ if (!try_sio)
+ wh_dentry = vfsub_lkup_one(wh_name, h_parent);
+ else
+ wh_dentry = au_sio_lkup_one(wh_name, h_parent);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry)) {
+ if (err == -ENAMETOOLONG)
+ err = 0;
+ goto out;
+ }
+
+ err = 0;
+ if (d_is_negative(wh_dentry))
+ goto out_wh; /* success */
+
+ err = 1;
+ if (d_is_reg(wh_dentry))
+ goto out_wh; /* success */
+
+ err = -EIO;
+ AuIOErr("%pd Invalid whiteout entry type 0%o.\n",
+ wh_dentry, d_inode(wh_dentry)->i_mode);
+
+out_wh:
+ dput(wh_dentry);
+out:
+ return err;
+}
+
+/*
+ * test if the @h_dentry sets opaque or not.
+ */
+int au_diropq_test(struct dentry *h_dentry)
+{
+ int err;
+ struct inode *h_dir;
+
+ h_dir = d_inode(h_dentry);
+ err = au_wh_test(h_dentry, &diropq_name,
+ au_test_h_perm_sio(h_dir, MAY_EXEC));
+ return err;
+}
+
+/*
+ * returns a negative dentry whose name is unique and temporary.
+ */
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+ struct qstr *prefix)
+{
+ struct dentry *dentry;
+ int i;
+ char defname[NAME_MAX - AUFS_MAX_NAMELEN + DNAME_INLINE_LEN + 1],
+ *name, *p;
+ /* strict atomic_t is unnecessary here */
+ static unsigned short cnt;
+ struct qstr qs;
+
+ BUILD_BUG_ON(sizeof(cnt) * 2 > AUFS_WH_TMP_LEN);
+
+ name = defname;
+ qs.len = sizeof(defname) - DNAME_INLINE_LEN + prefix->len - 1;
+ if (unlikely(prefix->len > DNAME_INLINE_LEN)) {
+ dentry = ERR_PTR(-ENAMETOOLONG);
+ if (unlikely(qs.len > NAME_MAX))
+ goto out;
+ dentry = ERR_PTR(-ENOMEM);
+ name = kmalloc(qs.len + 1, GFP_NOFS);
+ if (unlikely(!name))
+ goto out;
+ }
+
+ /* doubly whiteout-ed */
+ memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2);
+ p = name + AUFS_WH_PFX_LEN * 2;
+ memcpy(p, prefix->name, prefix->len);
+ p += prefix->len;
+ *p++ = '.';
+ AuDebugOn(name + qs.len + 1 - p <= AUFS_WH_TMP_LEN);
+
+ qs.name = name;
+ for (i = 0; i < 3; i++) {
+ sprintf(p, "%.*x", AUFS_WH_TMP_LEN, cnt++);
+ dentry = au_sio_lkup_one(&qs, h_parent);
+ if (IS_ERR(dentry) || d_is_negative(dentry))
+ goto out_name;
+ dput(dentry);
+ }
+ /* pr_warn("could not get random name\n"); */
+ dentry = ERR_PTR(-EEXIST);
+ AuDbg("%.*s\n", AuLNPair(&qs));
+ BUG();
+
+out_name:
+ if (name != defname)
+ au_kfree_try_rcu(name);
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+/*
+ * rename the @h_dentry on @br to the whiteouted temporary name.
+ */
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br)
+{
+ int err;
+ struct path h_path = {
+ .mnt = au_br_mnt(br)
+ };
+ struct inode *h_dir, *delegated;
+ struct dentry *h_parent;
+
+ h_parent = h_dentry->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+
+ h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name);
+ err = PTR_ERR(h_path.dentry);
+ if (IS_ERR(h_path.dentry))
+ goto out;
+
+ /* under the same dir, no need to lock_rename() */
+ delegated = NULL;
+ err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path, &delegated,
+ /*flags*/0);
+ AuTraceErr(err);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal rename\n");
+ iput(delegated);
+ }
+ dput(h_path.dentry);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * functions for removing a whiteout
+ */
+
+static int do_unlink_wh(struct inode *h_dir, struct path *h_path)
+{
+ int err, force;
+ struct inode *delegated;
+
+ /*
+ * forces superio when the dir has a sticky bit.
+ * this may be a violation of unix fs semantics.
+ */
+ force = (h_dir->i_mode & S_ISVTX)
+ && !uid_eq(current_fsuid(), d_inode(h_path->dentry)->i_uid);
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, h_path, &delegated, force);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ return err;
+}
+
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+ struct dentry *dentry)
+{
+ int err;
+
+ err = do_unlink_wh(h_dir, h_path);
+ if (!err && dentry)
+ au_set_dbwh(dentry, -1);
+
+ return err;
+}
+
+static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh,
+ struct au_branch *br)
+{
+ int err;
+ struct path h_path = {
+ .mnt = au_br_mnt(br)
+ };
+
+ err = 0;
+ h_path.dentry = vfsub_lkup_one(wh, h_parent);
+ if (IS_ERR(h_path.dentry))
+ err = PTR_ERR(h_path.dentry);
+ else {
+ if (d_is_reg(h_path.dentry))
+ err = do_unlink_wh(d_inode(h_parent), &h_path);
+ dput(h_path.dentry);
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * initialize/clean whiteout for a branch
+ */
+
+static void au_wh_clean(struct inode *h_dir, struct path *whpath,
+ const int isdir)
+{
+ int err;
+ struct inode *delegated;
+
+ if (d_is_negative(whpath->dentry))
+ return;
+
+ if (isdir)
+ err = vfsub_rmdir(h_dir, whpath);
+ else {
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, whpath, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ }
+ if (unlikely(err))
+ pr_warn("failed removing %pd (%d), ignored.\n",
+ whpath->dentry, err);
+}
+
+static int test_linkable(struct dentry *h_root)
+{
+ struct inode *h_dir = d_inode(h_root);
+
+ if (h_dir->i_op->link)
+ return 0;
+
+ pr_err("%pd (%s) doesn't support link(2), use noplink and rw+nolwh\n",
+ h_root, au_sbtype(h_root->d_sb));
+ return -ENOSYS;
+}
+
+/* todo: should this mkdir be done in /sbin/mount.aufs helper? */
+static int au_whdir(struct inode *h_dir, struct path *path)
+{
+ int err;
+
+ err = -EEXIST;
+ if (d_is_negative(path->dentry)) {
+ int mode = 0700;
+
+ if (au_test_nfs(path->dentry->d_sb))
+ mode |= 0111;
+ err = vfsub_mkdir(h_dir, path, mode);
+ } else if (d_is_dir(path->dentry))
+ err = 0;
+ else
+ pr_err("unknown %pd exists\n", path->dentry);
+
+ return err;
+}
+
+struct au_wh_base {
+ const struct qstr *name;
+ struct dentry *dentry;
+};
+
+static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[],
+ struct path *h_path)
+{
+ h_path->dentry = base[AuBrWh_BASE].dentry;
+ au_wh_clean(h_dir, h_path, /*isdir*/0);
+ h_path->dentry = base[AuBrWh_PLINK].dentry;
+ au_wh_clean(h_dir, h_path, /*isdir*/1);
+ h_path->dentry = base[AuBrWh_ORPH].dentry;
+ au_wh_clean(h_dir, h_path, /*isdir*/1);
+}
+
+/*
+ * returns tri-state,
+ * minus: error, caller should print the message
+ * zero: success
+ * plus: error, caller should NOT print the message
+ */
+static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr,
+ int do_plink, struct au_wh_base base[],
+ struct path *h_path)
+{
+ int err;
+ struct inode *h_dir;
+
+ h_dir = d_inode(h_root);
+ h_path->dentry = base[AuBrWh_BASE].dentry;
+ au_wh_clean(h_dir, h_path, /*isdir*/0);
+ h_path->dentry = base[AuBrWh_PLINK].dentry;
+ if (do_plink) {
+ err = test_linkable(h_root);
+ if (unlikely(err)) {
+ err = 1;
+ goto out;
+ }
+
+ err = au_whdir(h_dir, h_path);
+ if (unlikely(err))
+ goto out;
+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+ } else
+ au_wh_clean(h_dir, h_path, /*isdir*/1);
+ h_path->dentry = base[AuBrWh_ORPH].dentry;
+ err = au_whdir(h_dir, h_path);
+ if (unlikely(err))
+ goto out;
+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+ return err;
+}
+
+/*
+ * for the moment, aufs supports the branch filesystem which does not support
+ * link(2). testing on FAT which does not support i_op->setattr() fully either,
+ * copyup failed. finally, such filesystem will not be used as the writable
+ * branch.
+ *
+ * returns tri-state, see above.
+ */
+static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr,
+ int do_plink, struct au_wh_base base[],
+ struct path *h_path)
+{
+ int err;
+ struct inode *h_dir;
+
+ WbrWhMustWriteLock(wbr);
+
+ err = test_linkable(h_root);
+ if (unlikely(err)) {
+ err = 1;
+ goto out;
+ }
+
+ /*
+ * todo: should this create be done in /sbin/mount.aufs helper?
+ */
+ err = -EEXIST;
+ h_dir = d_inode(h_root);
+ if (d_is_negative(base[AuBrWh_BASE].dentry)) {
+ h_path->dentry = base[AuBrWh_BASE].dentry;
+ err = vfsub_create(h_dir, h_path, WH_MASK, /*want_excl*/true);
+ } else if (d_is_reg(base[AuBrWh_BASE].dentry))
+ err = 0;
+ else
+ pr_err("unknown %pd2 exists\n", base[AuBrWh_BASE].dentry);
+ if (unlikely(err))
+ goto out;
+
+ h_path->dentry = base[AuBrWh_PLINK].dentry;
+ if (do_plink) {
+ err = au_whdir(h_dir, h_path);
+ if (unlikely(err))
+ goto out;
+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+ } else
+ au_wh_clean(h_dir, h_path, /*isdir*/1);
+ wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry);
+
+ h_path->dentry = base[AuBrWh_ORPH].dentry;
+ err = au_whdir(h_dir, h_path);
+ if (unlikely(err))
+ goto out;
+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+ return err;
+}
+
+/*
+ * initialize the whiteout base file/dir for @br.
+ */
+int au_wh_init(struct au_branch *br, struct super_block *sb)
+{
+ int err, i;
+ const unsigned char do_plink
+ = !!au_opt_test(au_mntflags(sb), PLINK);
+ struct inode *h_dir;
+ struct path path = br->br_path;
+ struct dentry *h_root = path.dentry;
+ struct au_wbr *wbr = br->br_wbr;
+ static const struct qstr base_name[] = {
+ [AuBrWh_BASE] = QSTR_INIT(AUFS_BASE_NAME,
+ sizeof(AUFS_BASE_NAME) - 1),
+ [AuBrWh_PLINK] = QSTR_INIT(AUFS_PLINKDIR_NAME,
+ sizeof(AUFS_PLINKDIR_NAME) - 1),
+ [AuBrWh_ORPH] = QSTR_INIT(AUFS_ORPHDIR_NAME,
+ sizeof(AUFS_ORPHDIR_NAME) - 1)
+ };
+ struct au_wh_base base[] = {
+ [AuBrWh_BASE] = {
+ .name = base_name + AuBrWh_BASE,
+ .dentry = NULL
+ },
+ [AuBrWh_PLINK] = {
+ .name = base_name + AuBrWh_PLINK,
+ .dentry = NULL
+ },
+ [AuBrWh_ORPH] = {
+ .name = base_name + AuBrWh_ORPH,
+ .dentry = NULL
+ }
+ };
+
+ if (wbr)
+ WbrWhMustWriteLock(wbr);
+
+ for (i = 0; i < AuBrWh_Last; i++) {
+ /* doubly whiteouted */
+ struct dentry *d;
+
+ d = au_wh_lkup(h_root, (void *)base[i].name, br);
+ err = PTR_ERR(d);
+ if (IS_ERR(d))
+ goto out;
+
+ base[i].dentry = d;
+ AuDebugOn(wbr
+ && wbr->wbr_wh[i]
+ && wbr->wbr_wh[i] != base[i].dentry);
+ }
+
+ if (wbr)
+ for (i = 0; i < AuBrWh_Last; i++) {
+ dput(wbr->wbr_wh[i]);
+ wbr->wbr_wh[i] = NULL;
+ }
+
+ err = 0;
+ if (!au_br_writable(br->br_perm)) {
+ h_dir = d_inode(h_root);
+ au_wh_init_ro(h_dir, base, &path);
+ } else if (!au_br_wh_linkable(br->br_perm)) {
+ err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path);
+ if (err > 0)
+ goto out;
+ else if (err)
+ goto out_err;
+ } else {
+ err = au_wh_init_rw(h_root, wbr, do_plink, base, &path);
+ if (err > 0)
+ goto out;
+ else if (err)
+ goto out_err;
+ }
+ goto out; /* success */
+
+out_err:
+ pr_err("an error(%d) on the writable branch %pd(%s)\n",
+ err, h_root, au_sbtype(h_root->d_sb));
+out:
+ for (i = 0; i < AuBrWh_Last; i++)
+ dput(base[i].dentry);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * whiteouts are all hard-linked usually.
+ * when its link count reaches a ceiling, we create a new whiteout base
+ * asynchronously.
+ */
+
+struct reinit_br_wh {
+ struct super_block *sb;
+ struct au_branch *br;
+};
+
+static void reinit_br_wh(void *arg)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct path h_path;
+ struct reinit_br_wh *a = arg;
+ struct au_wbr *wbr;
+ struct inode *dir, *delegated;
+ struct dentry *h_root;
+ struct au_hinode *hdir;
+
+ err = 0;
+ wbr = a->br->br_wbr;
+ /* big aufs lock */
+ si_noflush_write_lock(a->sb);
+ if (!au_br_writable(a->br->br_perm))
+ goto out;
+ bindex = au_br_index(a->sb, a->br->br_id);
+ if (unlikely(bindex < 0))
+ goto out;
+
+ di_read_lock_parent(a->sb->s_root, AuLock_IR);
+ dir = d_inode(a->sb->s_root);
+ hdir = au_hi(dir, bindex);
+ h_root = au_h_dptr(a->sb->s_root, bindex);
+ AuDebugOn(h_root != au_br_dentry(a->br));
+
+ au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+ wbr_wh_write_lock(wbr);
+ err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode,
+ h_root, a->br);
+ if (!err) {
+ h_path.dentry = wbr->wbr_whbase;
+ h_path.mnt = au_br_mnt(a->br);
+ delegated = NULL;
+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated,
+ /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ } else {
+ pr_warn("%pd is moved, ignored\n", wbr->wbr_whbase);
+ err = 0;
+ }
+ dput(wbr->wbr_whbase);
+ wbr->wbr_whbase = NULL;
+ if (!err)
+ err = au_wh_init(a->br, a->sb);
+ wbr_wh_write_unlock(wbr);
+ au_hn_inode_unlock(hdir);
+ di_read_unlock(a->sb->s_root, AuLock_IR);
+ if (!err)
+ au_fhsm_wrote(a->sb, bindex, /*force*/0);
+
+out:
+ if (wbr)
+ atomic_dec(&wbr->wbr_wh_running);
+ au_lcnt_dec(&a->br->br_count);
+ si_write_unlock(a->sb);
+ au_nwt_done(&au_sbi(a->sb)->si_nowait);
+ au_kfree_rcu(a);
+ if (unlikely(err))
+ AuIOErr("err %d\n", err);
+}
+
+static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br)
+{
+ int do_dec, wkq_err;
+ struct reinit_br_wh *arg;
+
+ do_dec = 1;
+ if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1)
+ goto out;
+
+ /* ignore ENOMEM */
+ arg = kmalloc(sizeof(*arg), GFP_NOFS);
+ if (arg) {
+ /*
+ * dec(wh_running), kfree(arg) and dec(br_count)
+ * in reinit function
+ */
+ arg->sb = sb;
+ arg->br = br;
+ au_lcnt_inc(&br->br_count);
+ wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb, /*flags*/0);
+ if (unlikely(wkq_err)) {
+ atomic_dec(&br->br_wbr->wbr_wh_running);
+ au_lcnt_dec(&br->br_count);
+ au_kfree_rcu(arg);
+ }
+ do_dec = 0;
+ }
+
+out:
+ if (do_dec)
+ atomic_dec(&br->br_wbr->wbr_wh_running);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create the whiteout @wh.
+ */
+static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex,
+ struct dentry *wh)
+{
+ int err;
+ struct path h_path = {
+ .dentry = wh
+ };
+ struct au_branch *br;
+ struct au_wbr *wbr;
+ struct dentry *h_parent;
+ struct inode *h_dir, *delegated;
+
+ h_parent = wh->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+
+ br = au_sbr(sb, bindex);
+ h_path.mnt = au_br_mnt(br);
+ wbr = br->br_wbr;
+ wbr_wh_read_lock(wbr);
+ if (wbr->wbr_whbase) {
+ delegated = NULL;
+ err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ if (!err || err != -EMLINK)
+ goto out;
+
+ /* link count full. re-initialize br_whbase. */
+ kick_reinit_br_wh(sb, br);
+ }
+
+ /* return this error in this context */
+ err = vfsub_create(h_dir, &h_path, WH_MASK, /*want_excl*/true);
+ if (!err)
+ au_fhsm_wrote(sb, bindex, /*force*/0);
+
+out:
+ wbr_wh_read_unlock(wbr);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create or remove the diropq.
+ */
+static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int flags)
+{
+ struct dentry *opq_dentry, *h_dentry;
+ struct super_block *sb;
+ struct au_branch *br;
+ int err;
+
+ sb = dentry->d_sb;
+ br = au_sbr(sb, bindex);
+ h_dentry = au_h_dptr(dentry, bindex);
+ opq_dentry = vfsub_lkup_one(&diropq_name, h_dentry);
+ if (IS_ERR(opq_dentry))
+ goto out;
+
+ if (au_ftest_diropq(flags, CREATE)) {
+ err = link_or_create_wh(sb, bindex, opq_dentry);
+ if (!err) {
+ au_set_dbdiropq(dentry, bindex);
+ goto out; /* success */
+ }
+ } else {
+ struct path tmp = {
+ .dentry = opq_dentry,
+ .mnt = au_br_mnt(br)
+ };
+ err = do_unlink_wh(au_h_iptr(d_inode(dentry), bindex), &tmp);
+ if (!err)
+ au_set_dbdiropq(dentry, -1);
+ }
+ dput(opq_dentry);
+ opq_dentry = ERR_PTR(err);
+
+out:
+ return opq_dentry;
+}
+
+struct do_diropq_args {
+ struct dentry **errp;
+ struct dentry *dentry;
+ aufs_bindex_t bindex;
+ unsigned int flags;
+};
+
+static void call_do_diropq(void *args)
+{
+ struct do_diropq_args *a = args;
+ *a->errp = do_diropq(a->dentry, a->bindex, a->flags);
+}
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int flags)
+{
+ struct dentry *diropq, *h_dentry;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!au_test_h_perm_sio(d_inode(h_dentry), MAY_EXEC | MAY_WRITE))
+ diropq = do_diropq(dentry, bindex, flags);
+ else {
+ int wkq_err;
+ struct do_diropq_args args = {
+ .errp = &diropq,
+ .dentry = dentry,
+ .bindex = bindex,
+ .flags = flags
+ };
+
+ wkq_err = au_wkq_wait(call_do_diropq, &args);
+ if (unlikely(wkq_err))
+ diropq = ERR_PTR(wkq_err);
+ }
+
+ return diropq;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * lookup whiteout dentry.
+ * @h_parent: lower parent dentry which must exist and be locked
+ * @base_name: name of dentry which will be whiteouted
+ * returns dentry for whiteout.
+ */
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+ struct au_branch *br)
+{
+ int err;
+ struct qstr wh_name;
+ struct dentry *wh_dentry;
+
+ err = au_wh_name_alloc(&wh_name, base_name);
+ wh_dentry = ERR_PTR(err);
+ if (!err) {
+ wh_dentry = vfsub_lkup_one(&wh_name, h_parent);
+ au_kfree_try_rcu(wh_name.name);
+ }
+ return wh_dentry;
+}
+
+/*
+ * link/create a whiteout for @dentry on @bindex.
+ */
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent)
+{
+ struct dentry *wh_dentry;
+ struct super_block *sb;
+ int err;
+
+ sb = dentry->d_sb;
+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex));
+ if (!IS_ERR(wh_dentry) && d_is_negative(wh_dentry)) {
+ err = link_or_create_wh(sb, bindex, wh_dentry);
+ if (!err) {
+ au_set_dbwh(dentry, bindex);
+ au_fhsm_wrote(sb, bindex, /*force*/0);
+ } else {
+ dput(wh_dentry);
+ wh_dentry = ERR_PTR(err);
+ }
+ }
+
+ return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Delete all whiteouts in this directory on branch bindex. */
+static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist,
+ aufs_bindex_t bindex, struct au_branch *br)
+{
+ int err;
+ unsigned long ul, n;
+ struct qstr wh_name;
+ char *p;
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+ struct au_vdir_destr *str;
+
+ err = -ENOMEM;
+ p = (void *)__get_free_page(GFP_NOFS);
+ wh_name.name = p;
+ if (unlikely(!wh_name.name))
+ goto out;
+
+ err = 0;
+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+ p += AUFS_WH_PFX_LEN;
+ n = whlist->nh_num;
+ head = whlist->nh_head;
+ for (ul = 0; !err && ul < n; ul++, head++) {
+ hlist_for_each_entry(pos, head, wh_hash) {
+ if (pos->wh_bindex != bindex)
+ continue;
+
+ str = &pos->wh_str;
+ if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) {
+ memcpy(p, str->name, str->len);
+ wh_name.len = AUFS_WH_PFX_LEN + str->len;
+ err = unlink_wh_name(h_dentry, &wh_name, br);
+ if (!err)
+ continue;
+ break;
+ }
+ AuIOErr("whiteout name too long %.*s\n",
+ str->len, str->name);
+ err = -EIO;
+ break;
+ }
+ }
+ free_page((unsigned long)wh_name.name);
+
+out:
+ return err;
+}
+
+struct del_wh_children_args {
+ int *errp;
+ struct dentry *h_dentry;
+ struct au_nhash *whlist;
+ aufs_bindex_t bindex;
+ struct au_branch *br;
+};
+
+static void call_del_wh_children(void *args)
+{
+ struct del_wh_children_args *a = args;
+ *a->errp = del_wh_children(a->h_dentry, a->whlist, a->bindex, a->br);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp)
+{
+ struct au_whtmp_rmdir *whtmp;
+ int err;
+ unsigned int rdhash;
+
+ SiMustAnyLock(sb);
+
+ whtmp = kzalloc(sizeof(*whtmp), gfp);
+ if (unlikely(!whtmp)) {
+ whtmp = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ /* no estimation for dir size */
+ rdhash = au_sbi(sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = AUFS_RDHASH_DEF;
+ err = au_nhash_alloc(&whtmp->whlist, rdhash, gfp);
+ if (unlikely(err)) {
+ au_kfree_rcu(whtmp);
+ whtmp = ERR_PTR(err);
+ }
+
+out:
+ return whtmp;
+}
+
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp)
+{
+ if (whtmp->br)
+ au_lcnt_dec(&whtmp->br->br_count);
+ dput(whtmp->wh_dentry);
+ iput(whtmp->dir);
+ au_nhash_wh_free(&whtmp->whlist);
+ au_kfree_rcu(whtmp);
+}
+
+/*
+ * rmdir the whiteouted temporary named dir @h_dentry.
+ * @whlist: whiteouted children.
+ */
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct au_nhash *whlist)
+{
+ int err;
+ unsigned int h_nlink;
+ struct path h_tmp;
+ struct inode *wh_inode, *h_dir;
+ struct au_branch *br;
+
+ h_dir = d_inode(wh_dentry->d_parent); /* dir inode is locked */
+ IMustLock(h_dir);
+
+ br = au_sbr(dir->i_sb, bindex);
+ wh_inode = d_inode(wh_dentry);
+ inode_lock_nested(wh_inode, AuLsc_I_CHILD);
+
+ /*
+ * someone else might change some whiteouts while we were sleeping.
+ * it means this whlist may have an obsoleted entry.
+ */
+ if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE))
+ err = del_wh_children(wh_dentry, whlist, bindex, br);
+ else {
+ int wkq_err;
+ struct del_wh_children_args args = {
+ .errp = &err,
+ .h_dentry = wh_dentry,
+ .whlist = whlist,
+ .bindex = bindex,
+ .br = br
+ };
+
+ wkq_err = au_wkq_wait(call_del_wh_children, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+ inode_unlock(wh_inode);
+
+ if (!err) {
+ h_tmp.dentry = wh_dentry;
+ h_tmp.mnt = au_br_mnt(br);
+ h_nlink = h_dir->i_nlink;
+ err = vfsub_rmdir(h_dir, &h_tmp);
+ /* some fs doesn't change the parent nlink in some cases */
+ h_nlink -= h_dir->i_nlink;
+ }
+
+ if (!err) {
+ if (au_ibtop(dir) == bindex) {
+ /* todo: dir->i_mutex is necessary */
+ au_cpup_attr_timesizes(dir);
+ if (h_nlink)
+ vfsub_drop_nlink(dir);
+ }
+ return 0; /* success */
+ }
+
+ pr_warn("failed removing %pd(%d), ignored\n", wh_dentry, err);
+ return err;
+}
+
+static void call_rmdir_whtmp(void *args)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct au_whtmp_rmdir *a = args;
+ struct super_block *sb;
+ struct dentry *h_parent;
+ struct inode *h_dir;
+ struct au_hinode *hdir;
+
+ /* rmdir by nfsd may cause deadlock with this i_mutex */
+ /* inode_lock(a->dir); */
+ err = -EROFS;
+ sb = a->dir->i_sb;
+ si_read_lock(sb, !AuLock_FLUSH);
+ if (!au_br_writable(a->br->br_perm))
+ goto out;
+ bindex = au_br_index(sb, a->br->br_id);
+ if (unlikely(bindex < 0))
+ goto out;
+
+ err = -EIO;
+ ii_write_lock_parent(a->dir);
+ h_parent = dget_parent(a->wh_dentry);
+ h_dir = d_inode(h_parent);
+ hdir = au_hi(a->dir, bindex);
+ err = vfsub_mnt_want_write(au_br_mnt(a->br));
+ if (unlikely(err))
+ goto out_mnt;
+ au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+ err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent,
+ a->br);
+ if (!err)
+ err = au_whtmp_rmdir(a->dir, bindex, a->wh_dentry, &a->whlist);
+ au_hn_inode_unlock(hdir);
+ vfsub_mnt_drop_write(au_br_mnt(a->br));
+
+out_mnt:
+ dput(h_parent);
+ ii_write_unlock(a->dir);
+out:
+ /* inode_unlock(a->dir); */
+ au_whtmp_rmdir_free(a);
+ si_read_unlock(sb);
+ au_nwt_done(&au_sbi(sb)->si_nowait);
+ if (unlikely(err))
+ AuIOErr("err %d\n", err);
+}
+
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args)
+{
+ int wkq_err;
+ struct super_block *sb;
+
+ IMustLock(dir);
+
+ /* all post-process will be done in do_rmdir_whtmp(). */
+ sb = dir->i_sb;
+ args->dir = au_igrab(dir);
+ args->br = au_sbr(sb, bindex);
+ au_lcnt_inc(&args->br->br_count);
+ args->wh_dentry = dget(wh_dentry);
+ wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, sb, /*flags*/0);
+ if (unlikely(wkq_err)) {
+ pr_warn("rmdir error %pd (%d), ignored\n", wh_dentry, wkq_err);
+ au_whtmp_rmdir_free(args);
+ }
+}
diff --git a/fs/aufs/whout.h b/fs/aufs/whout.h
new file mode 100644
index 000000000000..06f69b24e5c1
--- /dev/null
+++ b/fs/aufs/whout.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#ifndef __AUFS_WHOUT_H__
+#define __AUFS_WHOUT_H__
+
+#ifdef __KERNEL__
+
+#include "dir.h"
+
+/* whout.c */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name);
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio);
+int au_diropq_test(struct dentry *h_dentry);
+struct au_branch;
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+ struct qstr *prefix);
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br);
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+ struct dentry *dentry);
+int au_wh_init(struct au_branch *br, struct super_block *sb);
+
+/* diropq flags */
+#define AuDiropq_CREATE 1
+#define au_ftest_diropq(flags, name) ((flags) & AuDiropq_##name)
+#define au_fset_diropq(flags, name) \
+ do { (flags) |= AuDiropq_##name; } while (0)
+#define au_fclr_diropq(flags, name) \
+ do { (flags) &= ~AuDiropq_##name; } while (0)
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int flags);
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+ struct au_branch *br);
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent);
+
+/* real rmdir for the whiteout-ed dir */
+struct au_whtmp_rmdir {
+ struct inode *dir;
+ struct au_branch *br;
+ struct dentry *wh_dentry;
+ struct au_nhash whlist;
+};
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp);
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp);
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct au_nhash *whlist);
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_diropq_create(struct dentry *dentry,
+ aufs_bindex_t bindex)
+{
+ return au_diropq_sio(dentry, bindex, AuDiropq_CREATE);
+}
+
+static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE));
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WHOUT_H__ */
diff --git a/fs/aufs/wkq.c b/fs/aufs/wkq.c
new file mode 100644
index 000000000000..b0dd8df4e414
--- /dev/null
+++ b/fs/aufs/wkq.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new credential scheme
+ */
+
+#include <linux/module.h>
+#include "aufs.h"
+
+/* internal workqueue named AUFS_WKQ_NAME */
+
+static struct workqueue_struct *au_wkq;
+
+struct au_wkinfo {
+ struct work_struct wk;
+ struct kobject *kobj;
+
+ unsigned int flags; /* see wkq.h */
+
+ au_wkq_func_t func;
+ void *args;
+
+#ifdef CONFIG_LOCKDEP
+ int dont_check;
+ struct held_lock **hlock;
+#endif
+
+ struct completion *comp;
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Aufs passes some operations to the workqueue such as the internal copyup.
+ * This scheme looks rather unnatural for LOCKDEP debugging feature, since the
+ * job run by workqueue depends upon the locks acquired in the other task.
+ * Delegating a small operation to the workqueue, aufs passes its lockdep
+ * information too. And the job in the workqueue restores the info in order to
+ * pretend as if it acquired those locks. This is just to make LOCKDEP work
+ * correctly and expectedly.
+ */
+
+#ifndef CONFIG_LOCKDEP
+AuStubInt0(au_wkq_lockdep_alloc, struct au_wkinfo *wkinfo);
+AuStubVoid(au_wkq_lockdep_free, struct au_wkinfo *wkinfo);
+AuStubVoid(au_wkq_lockdep_pre, struct au_wkinfo *wkinfo);
+AuStubVoid(au_wkq_lockdep_post, struct au_wkinfo *wkinfo);
+AuStubVoid(au_wkq_lockdep_init, struct au_wkinfo *wkinfo);
+#else
+static void au_wkq_lockdep_init(struct au_wkinfo *wkinfo)
+{
+ wkinfo->hlock = NULL;
+ wkinfo->dont_check = 0;
+}
+
+/*
+ * 1: matched
+ * 0: unmatched
+ */
+static int au_wkq_lockdep_test(struct lock_class_key *key, const char *name)
+{
+ static DEFINE_SPINLOCK(spin);
+ static struct {
+ char *name;
+ struct lock_class_key *key;
+ } a[] = {
+ { .name = "&sbinfo->si_rwsem" },
+ { .name = "&finfo->fi_rwsem" },
+ { .name = "&dinfo->di_rwsem" },
+ { .name = "&iinfo->ii_rwsem" }
+ };
+ static int set;
+ int i;
+
+ /* lockless read from 'set.' see below */
+ if (set == ARRAY_SIZE(a)) {
+ for (i = 0; i < ARRAY_SIZE(a); i++)
+ if (a[i].key == key)
+ goto match;
+ goto unmatch;
+ }
+
+ spin_lock(&spin);
+ if (set)
+ for (i = 0; i < ARRAY_SIZE(a); i++)
+ if (a[i].key == key) {
+ spin_unlock(&spin);
+ goto match;
+ }
+ for (i = 0; i < ARRAY_SIZE(a); i++) {
+ if (a[i].key) {
+ if (unlikely(a[i].key == key)) { /* rare but possible */
+ spin_unlock(&spin);
+ goto match;
+ } else
+ continue;
+ }
+ if (strstr(a[i].name, name)) {
+ /*
+ * the order of these three lines is important for the
+ * lockless read above.
+ */
+ a[i].key = key;
+ spin_unlock(&spin);
+ set++;
+ /* AuDbg("%d, %s\n", set, name); */
+ goto match;
+ }
+ }
+ spin_unlock(&spin);
+ goto unmatch;
+
+match:
+ return 1;
+unmatch:
+ return 0;
+}
+
+static int au_wkq_lockdep_alloc(struct au_wkinfo *wkinfo)
+{
+ int err, n;
+ struct task_struct *curr;
+ struct held_lock **hl, *held_locks, *p;
+
+ err = 0;
+ curr = current;
+ wkinfo->dont_check = lockdep_recursing(curr);
+ if (wkinfo->dont_check)
+ goto out;
+ n = curr->lockdep_depth;
+ if (!n)
+ goto out;
+
+ err = -ENOMEM;
+ wkinfo->hlock = kmalloc_array(n + 1, sizeof(*wkinfo->hlock), GFP_NOFS);
+ if (unlikely(!wkinfo->hlock))
+ goto out;
+
+ err = 0;
+#if 0
+ if (0 && au_debug_test()) /* left for debugging */
+ lockdep_print_held_locks(curr);
+#endif
+ held_locks = curr->held_locks;
+ hl = wkinfo->hlock;
+ while (n--) {
+ p = held_locks++;
+ if (au_wkq_lockdep_test(p->instance->key, p->instance->name))
+ *hl++ = p;
+ }
+ *hl = NULL;
+
+out:
+ return err;
+}
+
+static void au_wkq_lockdep_free(struct au_wkinfo *wkinfo)
+{
+ au_kfree_try_rcu(wkinfo->hlock);
+}
+
+static void au_wkq_lockdep_pre(struct au_wkinfo *wkinfo)
+{
+ struct held_lock *p, **hl = wkinfo->hlock;
+ int subclass;
+
+ if (wkinfo->dont_check)
+ lockdep_off();
+ if (!hl)
+ return;
+ while ((p = *hl++)) { /* assignment */
+ subclass = lockdep_hlock_class(p)->subclass;
+ /* AuDbg("%s, %d\n", p->instance->name, subclass); */
+ if (p->read)
+ rwsem_acquire_read(p->instance, subclass, 0,
+ /*p->acquire_ip*/_RET_IP_);
+ else
+ rwsem_acquire(p->instance, subclass, 0,
+ /*p->acquire_ip*/_RET_IP_);
+ }
+}
+
+static void au_wkq_lockdep_post(struct au_wkinfo *wkinfo)
+{
+ struct held_lock *p, **hl = wkinfo->hlock;
+
+ if (wkinfo->dont_check)
+ lockdep_on();
+ if (!hl)
+ return;
+ while ((p = *hl++)) /* assignment */
+ rwsem_release(p->instance, 0, /*p->acquire_ip*/_RET_IP_);
+}
+#endif
+
+static void wkq_func(struct work_struct *wk)
+{
+ struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
+
+ AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID));
+ AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY);
+
+ au_wkq_lockdep_pre(wkinfo);
+ wkinfo->func(wkinfo->args);
+ au_wkq_lockdep_post(wkinfo);
+ if (au_ftest_wkq(wkinfo->flags, WAIT))
+ complete(wkinfo->comp);
+ else {
+ kobject_put(wkinfo->kobj);
+ module_put(THIS_MODULE); /* todo: ?? */
+ au_kfree_rcu(wkinfo);
+ }
+}
+
+/*
+ * Since struct completion is large, try allocating it dynamically.
+ */
+#if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */
+#define AuWkqCompDeclare(name) struct completion *comp = NULL
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+ *comp = kmalloc(sizeof(**comp), GFP_NOFS);
+ if (*comp) {
+ init_completion(*comp);
+ wkinfo->comp = *comp;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static void au_wkq_comp_free(struct completion *comp)
+{
+ au_kfree_rcu(comp);
+}
+
+#else
+
+/* no braces */
+#define AuWkqCompDeclare(name) \
+ DECLARE_COMPLETION_ONSTACK(_ ## name); \
+ struct completion *comp = &_ ## name
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+ wkinfo->comp = *comp;
+ return 0;
+}
+
+static void au_wkq_comp_free(struct completion *comp __maybe_unused)
+{
+ /* empty */
+}
+#endif /* 4KSTACKS */
+
+static void au_wkq_run(struct au_wkinfo *wkinfo)
+{
+ if (au_ftest_wkq(wkinfo->flags, NEST)) {
+ if (au_wkq_test()) {
+ AuWarn1("wkq from wkq, unless silly-rename on NFS,"
+ " due to a dead dir by UDBA,"
+ " or async xino write?\n");
+ AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
+ }
+ } else
+ au_dbg_verify_kthread();
+
+ if (au_ftest_wkq(wkinfo->flags, WAIT)) {
+ INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func);
+ queue_work(au_wkq, &wkinfo->wk);
+ } else {
+ INIT_WORK(&wkinfo->wk, wkq_func);
+ schedule_work(&wkinfo->wk);
+ }
+}
+
+/*
+ * Be careful. It is easy to make deadlock happen.
+ * processA: lock, wkq and wait
+ * processB: wkq and wait, lock in wkq
+ * --> deadlock
+ */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args)
+{
+ int err;
+ AuWkqCompDeclare(comp);
+ struct au_wkinfo wkinfo = {
+ .flags = flags,
+ .func = func,
+ .args = args
+ };
+
+ err = au_wkq_comp_alloc(&wkinfo, &comp);
+ if (unlikely(err))
+ goto out;
+ err = au_wkq_lockdep_alloc(&wkinfo);
+ if (unlikely(err))
+ goto out_comp;
+ if (!err) {
+ au_wkq_run(&wkinfo);
+ /* no timeout, no interrupt */
+ wait_for_completion(wkinfo.comp);
+ }
+ au_wkq_lockdep_free(&wkinfo);
+
+out_comp:
+ au_wkq_comp_free(comp);
+out:
+ destroy_work_on_stack(&wkinfo.wk);
+ return err;
+}
+
+/*
+ * Note: dget/dput() in func for aufs dentries are not supported. It will be a
+ * problem in a concurrent umounting.
+ */
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+ unsigned int flags)
+{
+ int err;
+ struct au_wkinfo *wkinfo;
+
+ atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
+
+ /*
+ * wkq_func() must free this wkinfo.
+ * it highly depends upon the implementation of workqueue.
+ */
+ err = 0;
+ wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
+ if (wkinfo) {
+ wkinfo->kobj = &au_sbi(sb)->si_kobj;
+ wkinfo->flags = flags & ~AuWkq_WAIT;
+ wkinfo->func = func;
+ wkinfo->args = args;
+ wkinfo->comp = NULL;
+ au_wkq_lockdep_init(wkinfo);
+ kobject_get(wkinfo->kobj);
+ __module_get(THIS_MODULE); /* todo: ?? */
+
+ au_wkq_run(wkinfo);
+ } else {
+ err = -ENOMEM;
+ au_nwt_done(&au_sbi(sb)->si_nowait);
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_nwt_init(struct au_nowait_tasks *nwt)
+{
+ atomic_set(&nwt->nw_len, 0);
+ /* smp_mb(); */ /* atomic_set */
+ init_waitqueue_head(&nwt->nw_wq);
+}
+
+void au_wkq_fin(void)
+{
+ destroy_workqueue(au_wkq);
+}
+
+int __init au_wkq_init(void)
+{
+ int err;
+
+ err = 0;
+ au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE);
+ if (IS_ERR(au_wkq))
+ err = PTR_ERR(au_wkq);
+ else if (!au_wkq)
+ err = -ENOMEM;
+
+ return err;
+}
diff --git a/fs/aufs/wkq.h b/fs/aufs/wkq.h
new file mode 100644
index 000000000000..235370d91693
--- /dev/null
+++ b/fs/aufs/wkq.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new credentials management scheme
+ */
+
+#ifndef __AUFS_WKQ_H__
+#define __AUFS_WKQ_H__
+
+#ifdef __KERNEL__
+
+#include <linux/wait.h>
+
+struct super_block;
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue
+ */
+struct au_nowait_tasks {
+ atomic_t nw_len;
+ wait_queue_head_t nw_wq;
+};
+
+/* ---------------------------------------------------------------------- */
+
+typedef void (*au_wkq_func_t)(void *args);
+
+/* wkq flags */
+#define AuWkq_WAIT 1
+#define AuWkq_NEST (1 << 1)
+#define au_ftest_wkq(flags, name) ((flags) & AuWkq_##name)
+#define au_fset_wkq(flags, name) \
+ do { (flags) |= AuWkq_##name; } while (0)
+#define au_fclr_wkq(flags, name) \
+ do { (flags) &= ~AuWkq_##name; } while (0)
+
+/* wkq.c */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args);
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+ unsigned int flags);
+void au_nwt_init(struct au_nowait_tasks *nwt);
+int __init au_wkq_init(void);
+void au_wkq_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int au_wkq_test(void)
+{
+ return current->flags & PF_WQ_WORKER;
+}
+
+static inline int au_wkq_wait(au_wkq_func_t func, void *args)
+{
+ return au_wkq_do_wait(AuWkq_WAIT, func, args);
+}
+
+static inline void au_nwt_done(struct au_nowait_tasks *nwt)
+{
+ if (atomic_dec_and_test(&nwt->nw_len))
+ wake_up_all(&nwt->nw_wq);
+}
+
+static inline int au_nwt_flush(struct au_nowait_tasks *nwt)
+{
+ wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len));
+ return 0;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WKQ_H__ */
diff --git a/fs/aufs/xattr.c b/fs/aufs/xattr.c
new file mode 100644
index 000000000000..8e56b15c53b5
--- /dev/null
+++ b/fs/aufs/xattr.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * handling xattr functions
+ */
+
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+#include "aufs.h"
+
+static int au_xattr_ignore(int err, char *name, unsigned int ignore_flags)
+{
+ if (!ignore_flags)
+ goto out;
+ switch (err) {
+ case -ENOMEM:
+ case -EDQUOT:
+ goto out;
+ }
+
+ if ((ignore_flags & AuBrAttr_ICEX) == AuBrAttr_ICEX) {
+ err = 0;
+ goto out;
+ }
+
+#define cmp(brattr, prefix) do { \
+ if (!strncmp(name, XATTR_##prefix##_PREFIX, \
+ XATTR_##prefix##_PREFIX_LEN)) { \
+ if (ignore_flags & AuBrAttr_ICEX_##brattr) \
+ err = 0; \
+ goto out; \
+ } \
+ } while (0)
+
+ cmp(SEC, SECURITY);
+ cmp(SYS, SYSTEM);
+ cmp(TR, TRUSTED);
+ cmp(USR, USER);
+#undef cmp
+
+ if (ignore_flags & AuBrAttr_ICEX_OTH)
+ err = 0;
+
+out:
+ return err;
+}
+
+static const int au_xattr_out_of_list = AuBrAttr_ICEX_OTH << 1;
+
+static int au_do_cpup_xattr(struct dentry *h_dst, struct dentry *h_src,
+ char *name, char **buf, unsigned int ignore_flags,
+ unsigned int verbose)
+{
+ int err;
+ ssize_t ssz;
+ struct inode *h_idst;
+
+ ssz = vfs_getxattr_alloc(h_src, name, buf, 0, GFP_NOFS);
+ err = ssz;
+ if (unlikely(err <= 0)) {
+ if (err == -ENODATA
+ || (err == -EOPNOTSUPP
+ && ((ignore_flags & au_xattr_out_of_list)
+ || (au_test_nfs_noacl(d_inode(h_src))
+ && (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS)
+ || !strcmp(name,
+ XATTR_NAME_POSIX_ACL_DEFAULT))))
+ ))
+ err = 0;
+ if (err && (verbose || au_debug_test()))
+ pr_err("%s, err %d\n", name, err);
+ goto out;
+ }
+
+ /* unlock it temporary */
+ h_idst = d_inode(h_dst);
+ inode_unlock(h_idst);
+ err = vfsub_setxattr(h_dst, name, *buf, ssz, /*flags*/0);
+ inode_lock_nested(h_idst, AuLsc_I_CHILD2);
+ if (unlikely(err)) {
+ if (verbose || au_debug_test())
+ pr_err("%s, err %d\n", name, err);
+ err = au_xattr_ignore(err, name, ignore_flags);
+ }
+
+out:
+ return err;
+}
+
+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags,
+ unsigned int verbose)
+{
+ int err, unlocked, acl_access, acl_default;
+ ssize_t ssz;
+ struct inode *h_isrc, *h_idst;
+ char *value, *p, *o, *e;
+
+ /* try stopping to update the source inode while we are referencing */
+ /* there should not be the parent-child relationship between them */
+ h_isrc = d_inode(h_src);
+ h_idst = d_inode(h_dst);
+ inode_unlock(h_idst);
+ inode_lock_shared_nested(h_isrc, AuLsc_I_CHILD);
+ inode_lock_nested(h_idst, AuLsc_I_CHILD2);
+ unlocked = 0;
+
+ /* some filesystems don't list POSIX ACL, for example tmpfs */
+ ssz = vfs_listxattr(h_src, NULL, 0);
+ err = ssz;
+ if (unlikely(err < 0)) {
+ AuTraceErr(err);
+ if (err == -ENODATA
+ || err == -EOPNOTSUPP)
+ err = 0; /* ignore */
+ goto out;
+ }
+
+ err = 0;
+ p = NULL;
+ o = NULL;
+ if (ssz) {
+ err = -ENOMEM;
+ p = kmalloc(ssz, GFP_NOFS);
+ o = p;
+ if (unlikely(!p))
+ goto out;
+ err = vfs_listxattr(h_src, p, ssz);
+ }
+ inode_unlock_shared(h_isrc);
+ unlocked = 1;
+ AuDbg("err %d, ssz %zd\n", err, ssz);
+ if (unlikely(err < 0))
+ goto out_free;
+
+ err = 0;
+ e = p + ssz;
+ value = NULL;
+ acl_access = 0;
+ acl_default = 0;
+ while (!err && p < e) {
+ acl_access |= !strncmp(p, XATTR_NAME_POSIX_ACL_ACCESS,
+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1);
+ acl_default |= !strncmp(p, XATTR_NAME_POSIX_ACL_DEFAULT,
+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)
+ - 1);
+ err = au_do_cpup_xattr(h_dst, h_src, p, &value, ignore_flags,
+ verbose);
+ p += strlen(p) + 1;
+ }
+ AuTraceErr(err);
+ ignore_flags |= au_xattr_out_of_list;
+ if (!err && !acl_access) {
+ err = au_do_cpup_xattr(h_dst, h_src,
+ XATTR_NAME_POSIX_ACL_ACCESS, &value,
+ ignore_flags, verbose);
+ AuTraceErr(err);
+ }
+ if (!err && !acl_default) {
+ err = au_do_cpup_xattr(h_dst, h_src,
+ XATTR_NAME_POSIX_ACL_DEFAULT, &value,
+ ignore_flags, verbose);
+ AuTraceErr(err);
+ }
+
+ au_kfree_try_rcu(value);
+
+out_free:
+ au_kfree_try_rcu(o);
+out:
+ if (!unlocked)
+ inode_unlock_shared(h_isrc);
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_smack_reentering(struct super_block *sb)
+{
+#if IS_ENABLED(CONFIG_SECURITY_SMACK)
+ /*
+ * as a part of lookup, smack_d_instantiate() is called, and it calls
+ * i_op->getxattr(). ouch.
+ */
+ return si_pid_test(sb);
+#else
+ return 0;
+#endif
+}
+
+enum {
+ AU_XATTR_LIST,
+ AU_XATTR_GET
+};
+
+struct au_lgxattr {
+ int type;
+ union {
+ struct {
+ char *list;
+ size_t size;
+ } list;
+ struct {
+ const char *name;
+ void *value;
+ size_t size;
+ } get;
+ } u;
+};
+
+static ssize_t au_lgxattr(struct dentry *dentry, struct au_lgxattr *arg)
+{
+ ssize_t err;
+ int reenter;
+ struct path h_path;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ reenter = au_smack_reentering(sb);
+ if (!reenter) {
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out;
+ }
+ err = au_h_path_getattr(dentry, /*force*/1, &h_path, reenter);
+ if (unlikely(err))
+ goto out_si;
+ if (unlikely(!h_path.dentry))
+ /* illegally overlapped or something */
+ goto out_di; /* pretending success */
+
+ /* always topmost entry only */
+ switch (arg->type) {
+ case AU_XATTR_LIST:
+ err = vfs_listxattr(h_path.dentry,
+ arg->u.list.list, arg->u.list.size);
+ break;
+ case AU_XATTR_GET:
+ AuDebugOn(d_is_negative(h_path.dentry));
+ err = vfs_getxattr(h_path.dentry,
+ arg->u.get.name, arg->u.get.value,
+ arg->u.get.size);
+ break;
+ }
+
+out_di:
+ if (!reenter)
+ di_read_unlock(dentry, AuLock_IR);
+out_si:
+ if (!reenter)
+ si_read_unlock(sb);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size)
+{
+ struct au_lgxattr arg = {
+ .type = AU_XATTR_LIST,
+ .u.list = {
+ .list = list,
+ .size = size
+ },
+ };
+
+ return au_lgxattr(dentry, &arg);
+}
+
+static ssize_t au_getxattr(struct dentry *dentry,
+ struct inode *inode __maybe_unused,
+ const char *name, void *value, size_t size)
+{
+ struct au_lgxattr arg = {
+ .type = AU_XATTR_GET,
+ .u.get = {
+ .name = name,
+ .value = value,
+ .size = size
+ },
+ };
+
+ return au_lgxattr(dentry, &arg);
+}
+
+static int au_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
+{
+ struct au_sxattr arg = {
+ .type = AU_XATTR_SET,
+ .u.set = {
+ .name = name,
+ .value = value,
+ .size = size,
+ .flags = flags
+ },
+ };
+
+ return au_sxattr(dentry, inode, &arg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ return au_getxattr(dentry, inode, name, buffer, size);
+}
+
+static int au_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
+{
+ return au_setxattr(dentry, inode, name, value, size, flags);
+}
+
+static const struct xattr_handler au_xattr_handler = {
+ .name = "",
+ .prefix = "",
+ .get = au_xattr_get,
+ .set = au_xattr_set
+};
+
+static const struct xattr_handler *au_xattr_handlers[] = {
+#ifdef CONFIG_FS_POSIX_ACL
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
+#endif
+ &au_xattr_handler, /* must be last */
+ NULL
+};
+
+void au_xattr_init(struct super_block *sb)
+{
+ sb->s_xattr = au_xattr_handlers;
+}
diff --git a/fs/aufs/xino.c b/fs/aufs/xino.c
new file mode 100644
index 000000000000..7733b9a28779
--- /dev/null
+++ b/fs/aufs/xino.c
@@ -0,0 +1,1956 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * external inode number translation table and bitmap
+ *
+ * things to consider
+ * - the lifetime
+ * + au_xino object
+ * + XINO files (xino, xib, xigen)
+ * + dynamic debugfs entries (xiN)
+ * + static debugfs entries (xib, xigen)
+ * + static sysfs entry (xi_path)
+ * - several entry points to handle them.
+ * + mount(2) without xino option (default)
+ * + mount(2) with xino option
+ * + mount(2) with noxino option
+ * + umount(2)
+ * + remount with add/del branches
+ * + remount with xino/noxino options
+ */
+
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+static aufs_bindex_t sbr_find_shared(struct super_block *sb, aufs_bindex_t btop,
+ aufs_bindex_t bbot,
+ struct super_block *h_sb)
+{
+ /* todo: try binary-search if the branches are many */
+ for (; btop <= bbot; btop++)
+ if (h_sb == au_sbr_sb(sb, btop))
+ return btop;
+ return -1;
+}
+
+/*
+ * find another branch who is on the same filesystem of the specified
+ * branch{@btgt}. search until @bbot.
+ */
+static aufs_bindex_t is_sb_shared(struct super_block *sb, aufs_bindex_t btgt,
+ aufs_bindex_t bbot)
+{
+ aufs_bindex_t bindex;
+ struct super_block *tgt_sb;
+
+ tgt_sb = au_sbr_sb(sb, btgt);
+ bindex = sbr_find_shared(sb, /*btop*/0, btgt - 1, tgt_sb);
+ if (bindex < 0)
+ bindex = sbr_find_shared(sb, btgt + 1, bbot, tgt_sb);
+
+ return bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * stop unnecessary notify events at creating xino files
+ */
+
+aufs_bindex_t au_xi_root(struct super_block *sb, struct dentry *dentry)
+{
+ aufs_bindex_t bfound, bindex, bbot;
+ struct dentry *parent;
+ struct au_branch *br;
+
+ bfound = -1;
+ parent = dentry->d_parent; /* safe d_parent access */
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_dentry(br) == parent) {
+ bfound = bindex;
+ break;
+ }
+ }
+
+ AuDbg("bfound b%d\n", bfound);
+ return bfound;
+}
+
+struct au_xino_lock_dir {
+ struct au_hinode *hdir;
+ struct dentry *parent;
+ struct inode *dir;
+};
+
+static struct dentry *au_dget_parent_lock(struct dentry *dentry,
+ unsigned int lsc)
+{
+ struct dentry *parent;
+ struct inode *dir;
+
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ inode_lock_nested(dir, lsc);
+#if 0 /* it should not happen */
+ spin_lock(&dentry->d_lock);
+ if (unlikely(dentry->d_parent != parent)) {
+ spin_unlock(&dentry->d_lock);
+ inode_unlock(dir);
+ dput(parent);
+ parent = NULL;
+ goto out;
+ }
+ spin_unlock(&dentry->d_lock);
+
+out:
+#endif
+ return parent;
+}
+
+static void au_xino_lock_dir(struct super_block *sb, struct path *xipath,
+ struct au_xino_lock_dir *ldir)
+{
+ aufs_bindex_t bindex;
+
+ ldir->hdir = NULL;
+ bindex = au_xi_root(sb, xipath->dentry);
+ if (bindex >= 0) {
+ /* rw branch root */
+ ldir->hdir = au_hi(d_inode(sb->s_root), bindex);
+ au_hn_inode_lock_nested(ldir->hdir, AuLsc_I_PARENT);
+ } else {
+ /* other */
+ ldir->parent = au_dget_parent_lock(xipath->dentry,
+ AuLsc_I_PARENT);
+ ldir->dir = d_inode(ldir->parent);
+ }
+}
+
+static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir)
+{
+ if (ldir->hdir)
+ au_hn_inode_unlock(ldir->hdir);
+ else {
+ inode_unlock(ldir->dir);
+ dput(ldir->parent);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create and set a new xino file
+ */
+struct file *au_xino_create(struct super_block *sb, char *fpath, int silent)
+{
+ struct file *file;
+ struct dentry *h_parent, *d;
+ struct inode *h_dir, *inode;
+ int err;
+
+ /*
+ * at mount-time, and the xino file is the default path,
+ * hnotify is disabled so we have no notify events to ignore.
+ * when a user specified the xino, we cannot get au_hdir to be ignored.
+ */
+ file = vfsub_filp_open(fpath, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+ /* | __FMODE_NONOTIFY */,
+ 0666);
+ if (IS_ERR(file)) {
+ if (!silent)
+ pr_err("open %s(%ld)\n", fpath, PTR_ERR(file));
+ return file;
+ }
+
+ /* keep file count */
+ err = 0;
+ d = file->f_path.dentry;
+ h_parent = au_dget_parent_lock(d, AuLsc_I_PARENT);
+ /* mnt_want_write() is unnecessary here */
+ h_dir = d_inode(h_parent);
+ inode = file_inode(file);
+ /* no delegation since it is just created */
+ if (inode->i_nlink)
+ err = vfsub_unlink(h_dir, &file->f_path, /*delegated*/NULL,
+ /*force*/0);
+ inode_unlock(h_dir);
+ dput(h_parent);
+ if (unlikely(err)) {
+ if (!silent)
+ pr_err("unlink %s(%d)\n", fpath, err);
+ goto out;
+ }
+
+ err = -EINVAL;
+ if (unlikely(sb == d->d_sb)) {
+ if (!silent)
+ pr_err("%s must be outside\n", fpath);
+ goto out;
+ }
+ if (unlikely(au_test_fs_bad_xino(d->d_sb))) {
+ if (!silent)
+ pr_err("xino doesn't support %s(%s)\n",
+ fpath, au_sbtype(d->d_sb));
+ goto out;
+ }
+ return file; /* success */
+
+out:
+ fput(file);
+ file = ERR_PTR(err);
+ return file;
+}
+
+/*
+ * create a new xinofile at the same place/path as @base.
+ */
+struct file *au_xino_create2(struct super_block *sb, struct path *base,
+ struct file *copy_src)
+{
+ struct file *file;
+ struct dentry *dentry, *parent;
+ struct inode *dir, *delegated;
+ struct qstr *name;
+ struct path path;
+ int err, do_unlock;
+ struct au_xino_lock_dir ldir;
+
+ do_unlock = 1;
+ au_xino_lock_dir(sb, base, &ldir);
+ dentry = base->dentry;
+ parent = dentry->d_parent; /* dir inode is locked */
+ dir = d_inode(parent);
+ IMustLock(dir);
+
+ name = &dentry->d_name;
+ path.dentry = vfsub_lookup_one_len(name->name, parent, name->len);
+ if (IS_ERR(path.dentry)) {
+ file = (void *)path.dentry;
+ pr_err("%pd lookup err %ld\n", dentry, PTR_ERR(path.dentry));
+ goto out;
+ }
+
+ /* no need to mnt_want_write() since we call dentry_open() later */
+ err = vfs_create(dir, path.dentry, 0666, NULL);
+ if (unlikely(err)) {
+ file = ERR_PTR(err);
+ pr_err("%pd create err %d\n", dentry, err);
+ goto out_dput;
+ }
+
+ path.mnt = base->mnt;
+ file = vfsub_dentry_open(&path,
+ O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+ /* | __FMODE_NONOTIFY */);
+ if (IS_ERR(file)) {
+ pr_err("%pd open err %ld\n", dentry, PTR_ERR(file));
+ goto out_dput;
+ }
+
+ delegated = NULL;
+ err = vfsub_unlink(dir, &file->f_path, &delegated, /*force*/0);
+ au_xino_unlock_dir(&ldir);
+ do_unlock = 0;
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ if (unlikely(err)) {
+ pr_err("%pd unlink err %d\n", dentry, err);
+ goto out_fput;
+ }
+
+ if (copy_src) {
+ /* no one can touch copy_src xino */
+ err = au_copy_file(file, copy_src, vfsub_f_size_read(copy_src));
+ if (unlikely(err)) {
+ pr_err("%pd copy err %d\n", dentry, err);
+ goto out_fput;
+ }
+ }
+ goto out_dput; /* success */
+
+out_fput:
+ fput(file);
+ file = ERR_PTR(err);
+out_dput:
+ dput(path.dentry);
+out:
+ if (do_unlock)
+ au_xino_unlock_dir(&ldir);
+ return file;
+}
+
+struct file *au_xino_file1(struct au_xino *xi)
+{
+ struct file *file;
+ unsigned int u, nfile;
+
+ file = NULL;
+ nfile = xi->xi_nfile;
+ for (u = 0; u < nfile; u++) {
+ file = xi->xi_file[u];
+ if (file)
+ break;
+ }
+
+ return file;
+}
+
+static int au_xino_file_set(struct au_xino *xi, int idx, struct file *file)
+{
+ int err;
+ struct file *f;
+ void *p;
+
+ if (file)
+ get_file(file);
+
+ err = 0;
+ f = NULL;
+ if (idx < xi->xi_nfile) {
+ f = xi->xi_file[idx];
+ if (f)
+ fput(f);
+ } else {
+ p = au_kzrealloc(xi->xi_file,
+ sizeof(*xi->xi_file) * xi->xi_nfile,
+ sizeof(*xi->xi_file) * (idx + 1),
+ GFP_NOFS, /*may_shrink*/0);
+ if (p) {
+ MtxMustLock(&xi->xi_mtx);
+ xi->xi_file = p;
+ xi->xi_nfile = idx + 1;
+ } else {
+ err = -ENOMEM;
+ if (file)
+ fput(file);
+ goto out;
+ }
+ }
+ xi->xi_file[idx] = file;
+
+out:
+ return err;
+}
+
+/*
+ * if @xinew->xi is not set, then create new xigen file.
+ */
+struct file *au_xi_new(struct super_block *sb, struct au_xi_new *xinew)
+{
+ struct file *file;
+ int err;
+
+ SiMustAnyLock(sb);
+
+ file = au_xino_create2(sb, xinew->base, xinew->copy_src);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ pr_err("%s[%d], err %d\n",
+ xinew->xi ? "xino" : "xigen",
+ xinew->idx, err);
+ goto out;
+ }
+
+ if (xinew->xi)
+ err = au_xino_file_set(xinew->xi, xinew->idx, file);
+ else {
+ BUG();
+ /* todo: make xigen file an array */
+ /* err = au_xigen_file_set(sb, xinew->idx, file); */
+ }
+ fput(file);
+ if (unlikely(err))
+ file = ERR_PTR(err);
+
+out:
+ return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * truncate xino files
+ */
+static int au_xino_do_trunc(struct super_block *sb, aufs_bindex_t bindex,
+ int idx, struct kstatfs *st)
+{
+ int err;
+ blkcnt_t blocks;
+ struct file *file, *new_xino;
+ struct au_xi_new xinew = {
+ .idx = idx
+ };
+
+ err = 0;
+ xinew.xi = au_sbr(sb, bindex)->br_xino;
+ file = au_xino_file(xinew.xi, idx);
+ if (!file)
+ goto out;
+
+ xinew.base = &file->f_path;
+ err = vfs_statfs(xinew.base, st);
+ if (unlikely(err)) {
+ AuErr1("statfs err %d, ignored\n", err);
+ err = 0;
+ goto out;
+ }
+
+ blocks = file_inode(file)->i_blocks;
+ pr_info("begin truncating xino(b%d-%d), ib%llu, %llu/%llu free blks\n",
+ bindex, idx, (u64)blocks, st->f_bfree, st->f_blocks);
+
+ xinew.copy_src = file;
+ new_xino = au_xi_new(sb, &xinew);
+ if (IS_ERR(new_xino)) {
+ err = PTR_ERR(new_xino);
+ pr_err("xino(b%d-%d), err %d, ignored\n", bindex, idx, err);
+ goto out;
+ }
+
+ err = vfs_statfs(&new_xino->f_path, st);
+ if (!err)
+ pr_info("end truncating xino(b%d-%d), ib%llu, %llu/%llu free blks\n",
+ bindex, idx, (u64)file_inode(new_xino)->i_blocks,
+ st->f_bfree, st->f_blocks);
+ else {
+ AuErr1("statfs err %d, ignored\n", err);
+ err = 0;
+ }
+
+out:
+ return err;
+}
+
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex, int idx_begin)
+{
+ int err, i;
+ unsigned long jiffy;
+ aufs_bindex_t bbot;
+ struct kstatfs *st;
+ struct au_branch *br;
+ struct au_xino *xi;
+
+ err = -ENOMEM;
+ st = kmalloc(sizeof(*st), GFP_NOFS);
+ if (unlikely(!st))
+ goto out;
+
+ err = -EINVAL;
+ bbot = au_sbbot(sb);
+ if (unlikely(bindex < 0 || bbot < bindex))
+ goto out_st;
+
+ err = 0;
+ jiffy = jiffies;
+ br = au_sbr(sb, bindex);
+ xi = br->br_xino;
+ for (i = idx_begin; !err && i < xi->xi_nfile; i++)
+ err = au_xino_do_trunc(sb, bindex, i, st);
+ if (!err)
+ au_sbi(sb)->si_xino_jiffy = jiffy;
+
+out_st:
+ au_kfree_rcu(st);
+out:
+ return err;
+}
+
+struct xino_do_trunc_args {
+ struct super_block *sb;
+ struct au_branch *br;
+ int idx;
+};
+
+static void xino_do_trunc(void *_args)
+{
+ struct xino_do_trunc_args *args = _args;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct inode *dir;
+ int err, idx;
+ aufs_bindex_t bindex;
+
+ err = 0;
+ sb = args->sb;
+ dir = d_inode(sb->s_root);
+ br = args->br;
+ idx = args->idx;
+
+ si_noflush_write_lock(sb);
+ ii_read_lock_parent(dir);
+ bindex = au_br_index(sb, br->br_id);
+ err = au_xino_trunc(sb, bindex, idx);
+ ii_read_unlock(dir);
+ if (unlikely(err))
+ pr_warn("err b%d, (%d)\n", bindex, err);
+ atomic_dec(&br->br_xino->xi_truncating);
+ au_lcnt_dec(&br->br_count);
+ si_write_unlock(sb);
+ au_nwt_done(&au_sbi(sb)->si_nowait);
+ au_kfree_rcu(args);
+}
+
+/*
+ * returns the index in the xi_file array whose corresponding file is necessary
+ * to truncate, or -1 which means no need to truncate.
+ */
+static int xino_trunc_test(struct super_block *sb, struct au_branch *br)
+{
+ int err;
+ unsigned int u;
+ struct kstatfs st;
+ struct au_sbinfo *sbinfo;
+ struct au_xino *xi;
+ struct file *file;
+
+ /* todo: si_xino_expire and the ratio should be customizable */
+ sbinfo = au_sbi(sb);
+ if (time_before(jiffies,
+ sbinfo->si_xino_jiffy + sbinfo->si_xino_expire))
+ return -1;
+
+ /* truncation border */
+ xi = br->br_xino;
+ for (u = 0; u < xi->xi_nfile; u++) {
+ file = au_xino_file(xi, u);
+ if (!file)
+ continue;
+
+ err = vfs_statfs(&file->f_path, &st);
+ if (unlikely(err)) {
+ AuErr1("statfs err %d, ignored\n", err);
+ return -1;
+ }
+ if (div64_u64(st.f_bfree * 100, st.f_blocks)
+ >= AUFS_XINO_DEF_TRUNC)
+ return u;
+ }
+
+ return -1;
+}
+
+static void xino_try_trunc(struct super_block *sb, struct au_branch *br)
+{
+ int idx;
+ struct xino_do_trunc_args *args;
+ int wkq_err;
+
+ idx = xino_trunc_test(sb, br);
+ if (idx < 0)
+ return;
+
+ if (atomic_inc_return(&br->br_xino->xi_truncating) > 1)
+ goto out;
+
+ /* lock and kfree() will be called in trunc_xino() */
+ args = kmalloc(sizeof(*args), GFP_NOFS);
+ if (unlikely(!args)) {
+ AuErr1("no memory\n");
+ goto out;
+ }
+
+ au_lcnt_inc(&br->br_count);
+ args->sb = sb;
+ args->br = br;
+ args->idx = idx;
+ wkq_err = au_wkq_nowait(xino_do_trunc, args, sb, /*flags*/0);
+ if (!wkq_err)
+ return; /* success */
+
+ pr_err("wkq %d\n", wkq_err);
+ au_lcnt_dec(&br->br_count);
+ au_kfree_rcu(args);
+
+out:
+ atomic_dec(&br->br_xino->xi_truncating);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_xi_calc {
+ int idx;
+ loff_t pos;
+};
+
+static void au_xi_calc(struct super_block *sb, ino_t h_ino,
+ struct au_xi_calc *calc)
+{
+ loff_t maxent;
+
+ maxent = au_xi_maxent(sb);
+ calc->idx = div64_u64_rem(h_ino, maxent, &calc->pos);
+ calc->pos *= sizeof(ino_t);
+}
+
+static int au_xino_do_new_async(struct super_block *sb, struct au_branch *br,
+ struct au_xi_calc *calc)
+{
+ int err;
+ struct file *file;
+ struct au_xino *xi = br->br_xino;
+ struct au_xi_new xinew = {
+ .xi = xi
+ };
+
+ SiMustAnyLock(sb);
+
+ err = 0;
+ if (!xi)
+ goto out;
+
+ mutex_lock(&xi->xi_mtx);
+ file = au_xino_file(xi, calc->idx);
+ if (file)
+ goto out_mtx;
+
+ file = au_xino_file(xi, /*idx*/-1);
+ AuDebugOn(!file);
+ xinew.idx = calc->idx;
+ xinew.base = &file->f_path;
+ /* xinew.copy_src = NULL; */
+ file = au_xi_new(sb, &xinew);
+ if (IS_ERR(file))
+ err = PTR_ERR(file);
+
+out_mtx:
+ mutex_unlock(&xi->xi_mtx);
+out:
+ return err;
+}
+
+struct au_xino_do_new_async_args {
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_xi_calc calc;
+ ino_t ino;
+};
+
+struct au_xi_writing {
+ struct hlist_bl_node node;
+ ino_t h_ino, ino;
+};
+
+static int au_xino_do_write(vfs_writef_t write, struct file *file,
+ struct au_xi_calc *calc, ino_t ino);
+
+static void au_xino_call_do_new_async(void *args)
+{
+ struct au_xino_do_new_async_args *a = args;
+ struct au_branch *br;
+ struct super_block *sb;
+ struct au_sbinfo *sbi;
+ struct inode *root;
+ struct file *file;
+ struct au_xi_writing *del, *p;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos;
+ int err;
+
+ br = a->br;
+ sb = a->sb;
+ sbi = au_sbi(sb);
+ si_noflush_read_lock(sb);
+ root = d_inode(sb->s_root);
+ ii_read_lock_child(root);
+ err = au_xino_do_new_async(sb, br, &a->calc);
+ if (unlikely(err)) {
+ AuIOErr("err %d\n", err);
+ goto out;
+ }
+
+ file = au_xino_file(br->br_xino, a->calc.idx);
+ AuDebugOn(!file);
+ err = au_xino_do_write(sbi->si_xwrite, file, &a->calc, a->ino);
+ if (unlikely(err)) {
+ AuIOErr("err %d\n", err);
+ goto out;
+ }
+
+ del = NULL;
+ hbl = &br->br_xino->xi_writing;
+ hlist_bl_lock(hbl);
+ au_hbl_for_each(pos, hbl) {
+ p = container_of(pos, struct au_xi_writing, node);
+ if (p->ino == a->ino) {
+ del = p;
+ hlist_bl_del(&p->node);
+ break;
+ }
+ }
+ hlist_bl_unlock(hbl);
+ au_kfree_rcu(del);
+
+out:
+ au_lcnt_dec(&br->br_count);
+ ii_read_unlock(root);
+ si_read_unlock(sb);
+ au_nwt_done(&sbi->si_nowait);
+ au_kfree_rcu(a);
+}
+
+/*
+ * create a new xino file asynchronously
+ */
+static int au_xino_new_async(struct super_block *sb, struct au_branch *br,
+ struct au_xi_calc *calc, ino_t ino)
+{
+ int err;
+ struct au_xino_do_new_async_args *arg;
+
+ err = -ENOMEM;
+ arg = kmalloc(sizeof(*arg), GFP_NOFS);
+ if (unlikely(!arg))
+ goto out;
+
+ arg->sb = sb;
+ arg->br = br;
+ arg->calc = *calc;
+ arg->ino = ino;
+ au_lcnt_inc(&br->br_count);
+ err = au_wkq_nowait(au_xino_call_do_new_async, arg, sb, AuWkq_NEST);
+ if (unlikely(err)) {
+ pr_err("wkq %d\n", err);
+ au_lcnt_dec(&br->br_count);
+ au_kfree_rcu(arg);
+ }
+
+out:
+ return err;
+}
+
+/*
+ * read @ino from xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ */
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ ino_t *ino)
+{
+ int err;
+ ssize_t sz;
+ struct au_xi_calc calc;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+ struct au_xino *xi;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos;
+ struct au_xi_writing *p;
+
+ *ino = 0;
+ if (!au_opt_test(au_mntflags(sb), XINO))
+ return 0; /* no xino */
+
+ err = 0;
+ au_xi_calc(sb, h_ino, &calc);
+ xi = au_sbr(sb, bindex)->br_xino;
+ file = au_xino_file(xi, calc.idx);
+ if (!file) {
+ hbl = &xi->xi_writing;
+ hlist_bl_lock(hbl);
+ au_hbl_for_each(pos, hbl) {
+ p = container_of(pos, struct au_xi_writing, node);
+ if (p->h_ino == h_ino) {
+ AuDbg("hi%llu, i%llu, found\n",
+ (u64)p->h_ino, (u64)p->ino);
+ *ino = p->ino;
+ break;
+ }
+ }
+ hlist_bl_unlock(hbl);
+ return 0;
+ } else if (vfsub_f_size_read(file) < calc.pos + sizeof(*ino))
+ return 0; /* no xino */
+
+ sbinfo = au_sbi(sb);
+ sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &calc.pos);
+ if (sz == sizeof(*ino))
+ return 0; /* success */
+
+ err = sz;
+ if (unlikely(sz >= 0)) {
+ err = -EIO;
+ AuIOErr("xino read error (%zd)\n", sz);
+ }
+ return err;
+}
+
+static int au_xino_do_write(vfs_writef_t write, struct file *file,
+ struct au_xi_calc *calc, ino_t ino)
+{
+ ssize_t sz;
+
+ sz = xino_fwrite(write, file, &ino, sizeof(ino), &calc->pos);
+ if (sz == sizeof(ino))
+ return 0; /* success */
+
+ AuIOErr("write failed (%zd)\n", sz);
+ return -EIO;
+}
+
+/*
+ * write @ino to the xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ * even if @ino is zero, it is written to the xinofile and means no entry.
+ * if the size of the xino file on a specific filesystem exceeds the watermark,
+ * try truncating it.
+ */
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ ino_t ino)
+{
+ int err;
+ unsigned int mnt_flags;
+ struct au_xi_calc calc;
+ struct file *file;
+ struct au_branch *br;
+ struct au_xino *xi;
+ struct au_xi_writing *p;
+
+ SiMustAnyLock(sb);
+
+ mnt_flags = au_mntflags(sb);
+ if (!au_opt_test(mnt_flags, XINO))
+ return 0;
+
+ au_xi_calc(sb, h_ino, &calc);
+ br = au_sbr(sb, bindex);
+ xi = br->br_xino;
+ file = au_xino_file(xi, calc.idx);
+ if (!file) {
+ /* store the inum pair into the list */
+ p = kmalloc(sizeof(*p), GFP_NOFS | __GFP_NOFAIL);
+ p->h_ino = h_ino;
+ p->ino = ino;
+ au_hbl_add(&p->node, &xi->xi_writing);
+
+ /* create and write a new xino file asynchronously */
+ err = au_xino_new_async(sb, br, &calc, ino);
+ if (!err)
+ return 0; /* success */
+ goto out;
+ }
+
+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, file, &calc, ino);
+ if (!err) {
+ br = au_sbr(sb, bindex);
+ if (au_opt_test(mnt_flags, TRUNC_XINO)
+ && au_test_fs_trunc_xino(au_br_sb(br)))
+ xino_try_trunc(sb, br);
+ return 0; /* success */
+ }
+
+out:
+ AuIOErr("write failed (%d)\n", err);
+ return -EIO;
+}
+
+static ssize_t xino_fread_wkq(vfs_readf_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos);
+
+/* todo: unnecessary to support mmap_sem since kernel-space? */
+ssize_t xino_fread(vfs_readf_t func, struct file *file, void *kbuf, size_t size,
+ loff_t *pos)
+{
+ ssize_t err;
+ mm_segment_t oldfs;
+ union {
+ void *k;
+ char __user *u;
+ } buf;
+ int i;
+ const int prevent_endless = 10;
+
+ i = 0;
+ buf.k = kbuf;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ do {
+ err = func(file, buf.u, size, pos);
+ if (err == -EINTR
+ && !au_wkq_test()
+ && fatal_signal_pending(current)) {
+ set_fs(oldfs);
+ err = xino_fread_wkq(func, file, kbuf, size, pos);
+ BUG_ON(err == -EINTR);
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ }
+ } while (i++ < prevent_endless
+ && (err == -EAGAIN || err == -EINTR));
+ set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+ if (err > 0)
+ fsnotify_access(file->f_path.dentry);
+#endif
+
+ return err;
+}
+
+struct xino_fread_args {
+ ssize_t *errp;
+ vfs_readf_t func;
+ struct file *file;
+ void *buf;
+ size_t size;
+ loff_t *pos;
+};
+
+static void call_xino_fread(void *args)
+{
+ struct xino_fread_args *a = args;
+ *a->errp = xino_fread(a->func, a->file, a->buf, a->size, a->pos);
+}
+
+static ssize_t xino_fread_wkq(vfs_readf_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos)
+{
+ ssize_t err;
+ int wkq_err;
+ struct xino_fread_args args = {
+ .errp = &err,
+ .func = func,
+ .file = file,
+ .buf = buf,
+ .size = size,
+ .pos = pos
+ };
+
+ wkq_err = au_wkq_wait(call_xino_fread, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+
+ return err;
+}
+
+static ssize_t xino_fwrite_wkq(vfs_writef_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos);
+
+static ssize_t do_xino_fwrite(vfs_writef_t func, struct file *file, void *kbuf,
+ size_t size, loff_t *pos)
+{
+ ssize_t err;
+ mm_segment_t oldfs;
+ union {
+ void *k;
+ const char __user *u;
+ } buf;
+ int i;
+ const int prevent_endless = 10;
+
+ i = 0;
+ buf.k = kbuf;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ do {
+ err = func(file, buf.u, size, pos);
+ if (err == -EINTR
+ && !au_wkq_test()
+ && fatal_signal_pending(current)) {
+ set_fs(oldfs);
+ err = xino_fwrite_wkq(func, file, kbuf, size, pos);
+ BUG_ON(err == -EINTR);
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ }
+ } while (i++ < prevent_endless
+ && (err == -EAGAIN || err == -EINTR));
+ set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+ if (err > 0)
+ fsnotify_modify(file->f_path.dentry);
+#endif
+
+ return err;
+}
+
+struct do_xino_fwrite_args {
+ ssize_t *errp;
+ vfs_writef_t func;
+ struct file *file;
+ void *buf;
+ size_t size;
+ loff_t *pos;
+};
+
+static void call_do_xino_fwrite(void *args)
+{
+ struct do_xino_fwrite_args *a = args;
+ *a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos);
+}
+
+static ssize_t xino_fwrite_wkq(vfs_writef_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos)
+{
+ ssize_t err;
+ int wkq_err;
+ struct do_xino_fwrite_args args = {
+ .errp = &err,
+ .func = func,
+ .file = file,
+ .buf = buf,
+ .size = size,
+ .pos = pos
+ };
+
+ /*
+ * it breaks RLIMIT_FSIZE and normal user's limit,
+ * users should care about quota and real 'filesystem full.'
+ */
+ wkq_err = au_wkq_wait(call_do_xino_fwrite, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+
+ return err;
+}
+
+ssize_t xino_fwrite(vfs_writef_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos)
+{
+ ssize_t err;
+
+ if (rlimit(RLIMIT_FSIZE) == RLIM_INFINITY) {
+ lockdep_off();
+ err = do_xino_fwrite(func, file, buf, size, pos);
+ lockdep_on();
+ } else {
+ lockdep_off();
+ err = xino_fwrite_wkq(func, file, buf, size, pos);
+ lockdep_on();
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * inode number bitmap
+ */
+static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE;
+static ino_t xib_calc_ino(unsigned long pindex, int bit)
+{
+ ino_t ino;
+
+ AuDebugOn(bit < 0 || page_bits <= bit);
+ ino = AUFS_FIRST_INO + pindex * page_bits + bit;
+ return ino;
+}
+
+static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit)
+{
+ AuDebugOn(ino < AUFS_FIRST_INO);
+ ino -= AUFS_FIRST_INO;
+ *pindex = ino / page_bits;
+ *bit = ino % page_bits;
+}
+
+static int xib_pindex(struct super_block *sb, unsigned long pindex)
+{
+ int err;
+ loff_t pos;
+ ssize_t sz;
+ struct au_sbinfo *sbinfo;
+ struct file *xib;
+ unsigned long *p;
+
+ sbinfo = au_sbi(sb);
+ MtxMustLock(&sbinfo->si_xib_mtx);
+ AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE
+ || !au_opt_test(sbinfo->si_mntflags, XINO));
+
+ if (pindex == sbinfo->si_xib_last_pindex)
+ return 0;
+
+ xib = sbinfo->si_xib;
+ p = sbinfo->si_xib_buf;
+ pos = sbinfo->si_xib_last_pindex;
+ pos *= PAGE_SIZE;
+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+ if (unlikely(sz != PAGE_SIZE))
+ goto out;
+
+ pos = pindex;
+ pos *= PAGE_SIZE;
+ if (vfsub_f_size_read(xib) >= pos + PAGE_SIZE)
+ sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos);
+ else {
+ memset(p, 0, PAGE_SIZE);
+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+ }
+ if (sz == PAGE_SIZE) {
+ sbinfo->si_xib_last_pindex = pindex;
+ return 0; /* success */
+ }
+
+out:
+ AuIOErr1("write failed (%zd)\n", sz);
+ err = sz;
+ if (sz >= 0)
+ err = -EIO;
+ return err;
+}
+
+static void au_xib_clear_bit(struct inode *inode)
+{
+ int err, bit;
+ unsigned long pindex;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+
+ AuDebugOn(inode->i_nlink);
+
+ sb = inode->i_sb;
+ xib_calc_bit(inode->i_ino, &pindex, &bit);
+ AuDebugOn(page_bits <= bit);
+ sbinfo = au_sbi(sb);
+ mutex_lock(&sbinfo->si_xib_mtx);
+ err = xib_pindex(sb, pindex);
+ if (!err) {
+ clear_bit(bit, sbinfo->si_xib_buf);
+ sbinfo->si_xib_next_bit = bit;
+ }
+ mutex_unlock(&sbinfo->si_xib_mtx);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * truncate a xino bitmap file
+ */
+
+/* todo: slow */
+static int do_xib_restore(struct super_block *sb, struct file *file, void *page)
+{
+ int err, bit;
+ ssize_t sz;
+ unsigned long pindex;
+ loff_t pos, pend;
+ struct au_sbinfo *sbinfo;
+ vfs_readf_t func;
+ ino_t *ino;
+ unsigned long *p;
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ MtxMustLock(&sbinfo->si_xib_mtx);
+ p = sbinfo->si_xib_buf;
+ func = sbinfo->si_xread;
+ pend = vfsub_f_size_read(file);
+ pos = 0;
+ while (pos < pend) {
+ sz = xino_fread(func, file, page, PAGE_SIZE, &pos);
+ err = sz;
+ if (unlikely(sz <= 0))
+ goto out;
+
+ err = 0;
+ for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) {
+ if (unlikely(*ino < AUFS_FIRST_INO))
+ continue;
+
+ xib_calc_bit(*ino, &pindex, &bit);
+ AuDebugOn(page_bits <= bit);
+ err = xib_pindex(sb, pindex);
+ if (!err)
+ set_bit(bit, p);
+ else
+ goto out;
+ }
+ }
+
+out:
+ return err;
+}
+
+static int xib_restore(struct super_block *sb)
+{
+ int err, i;
+ unsigned int nfile;
+ aufs_bindex_t bindex, bbot;
+ void *page;
+ struct au_branch *br;
+ struct au_xino *xi;
+ struct file *file;
+
+ err = -ENOMEM;
+ page = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!page))
+ goto out;
+
+ err = 0;
+ bbot = au_sbbot(sb);
+ for (bindex = 0; !err && bindex <= bbot; bindex++)
+ if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0) {
+ br = au_sbr(sb, bindex);
+ xi = br->br_xino;
+ nfile = xi->xi_nfile;
+ for (i = 0; i < nfile; i++) {
+ file = au_xino_file(xi, i);
+ if (file)
+ err = do_xib_restore(sb, file, page);
+ }
+ } else
+ AuDbg("skip shared b%d\n", bindex);
+ free_page((unsigned long)page);
+
+out:
+ return err;
+}
+
+int au_xib_trunc(struct super_block *sb)
+{
+ int err;
+ ssize_t sz;
+ loff_t pos;
+ struct au_sbinfo *sbinfo;
+ unsigned long *p;
+ struct file *file;
+
+ SiMustWriteLock(sb);
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ if (!au_opt_test(sbinfo->si_mntflags, XINO))
+ goto out;
+
+ file = sbinfo->si_xib;
+ if (vfsub_f_size_read(file) <= PAGE_SIZE)
+ goto out;
+
+ file = au_xino_create2(sb, &sbinfo->si_xib->f_path, NULL);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+ fput(sbinfo->si_xib);
+ sbinfo->si_xib = file;
+
+ p = sbinfo->si_xib_buf;
+ memset(p, 0, PAGE_SIZE);
+ pos = 0;
+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos);
+ if (unlikely(sz != PAGE_SIZE)) {
+ err = sz;
+ AuIOErr("err %d\n", err);
+ if (sz >= 0)
+ err = -EIO;
+ goto out;
+ }
+
+ mutex_lock(&sbinfo->si_xib_mtx);
+ /* mnt_want_write() is unnecessary here */
+ err = xib_restore(sb);
+ mutex_unlock(&sbinfo->si_xib_mtx);
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_xino *au_xino_alloc(unsigned int nfile)
+{
+ struct au_xino *xi;
+
+ xi = kzalloc(sizeof(*xi), GFP_NOFS);
+ if (unlikely(!xi))
+ goto out;
+ xi->xi_nfile = nfile;
+ xi->xi_file = kcalloc(nfile, sizeof(*xi->xi_file), GFP_NOFS);
+ if (unlikely(!xi->xi_file))
+ goto out_free;
+
+ xi->xi_nondir.total = 8; /* initial size */
+ xi->xi_nondir.array = kcalloc(xi->xi_nondir.total, sizeof(ino_t),
+ GFP_NOFS);
+ if (unlikely(!xi->xi_nondir.array))
+ goto out_file;
+
+ spin_lock_init(&xi->xi_nondir.spin);
+ init_waitqueue_head(&xi->xi_nondir.wqh);
+ mutex_init(&xi->xi_mtx);
+ INIT_HLIST_BL_HEAD(&xi->xi_writing);
+ atomic_set(&xi->xi_truncating, 0);
+ kref_init(&xi->xi_kref);
+ goto out; /* success */
+
+out_file:
+ au_kfree_try_rcu(xi->xi_file);
+out_free:
+ au_kfree_rcu(xi);
+ xi = NULL;
+out:
+ return xi;
+}
+
+static int au_xino_init(struct au_branch *br, int idx, struct file *file)
+{
+ int err;
+ struct au_xino *xi;
+
+ err = 0;
+ xi = au_xino_alloc(idx + 1);
+ if (unlikely(!xi)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (file)
+ get_file(file);
+ xi->xi_file[idx] = file;
+ AuDebugOn(br->br_xino);
+ br->br_xino = xi;
+
+out:
+ return err;
+}
+
+static void au_xino_release(struct kref *kref)
+{
+ struct au_xino *xi;
+ int i;
+ unsigned long ul;
+ struct hlist_bl_head *hbl;
+ struct hlist_bl_node *pos, *n;
+ struct au_xi_writing *p;
+
+ xi = container_of(kref, struct au_xino, xi_kref);
+ for (i = 0; i < xi->xi_nfile; i++)
+ if (xi->xi_file[i])
+ fput(xi->xi_file[i]);
+ for (i = xi->xi_nondir.total - 1; i >= 0; i--)
+ AuDebugOn(xi->xi_nondir.array[i]);
+ mutex_destroy(&xi->xi_mtx);
+ hbl = &xi->xi_writing;
+ ul = au_hbl_count(hbl);
+ if (unlikely(ul)) {
+ pr_warn("xi_writing %lu\n", ul);
+ hlist_bl_lock(hbl);
+ hlist_bl_for_each_entry_safe (p, pos, n, hbl, node) {
+ hlist_bl_del(&p->node);
+ au_kfree_rcu(p);
+ }
+ hlist_bl_unlock(hbl);
+ }
+ au_kfree_try_rcu(xi->xi_file);
+ au_kfree_try_rcu(xi->xi_nondir.array);
+ au_kfree_rcu(xi);
+}
+
+int au_xino_put(struct au_branch *br)
+{
+ int ret;
+ struct au_xino *xi;
+
+ ret = 0;
+ xi = br->br_xino;
+ if (xi) {
+ br->br_xino = NULL;
+ ret = kref_put(&xi->xi_kref, au_xino_release);
+ }
+
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * xino mount option handlers
+ */
+
+/* xino bitmap */
+static void xino_clear_xib(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ /* unnecessary to clear sbinfo->si_xread and ->si_xwrite */
+ if (sbinfo->si_xib)
+ fput(sbinfo->si_xib);
+ sbinfo->si_xib = NULL;
+ if (sbinfo->si_xib_buf)
+ free_page((unsigned long)sbinfo->si_xib_buf);
+ sbinfo->si_xib_buf = NULL;
+}
+
+static int au_xino_set_xib(struct super_block *sb, struct path *path)
+{
+ int err;
+ loff_t pos;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+ struct super_block *xi_sb;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ file = au_xino_create2(sb, path, sbinfo->si_xib);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+ if (sbinfo->si_xib)
+ fput(sbinfo->si_xib);
+ sbinfo->si_xib = file;
+ sbinfo->si_xread = vfs_readf(file);
+ sbinfo->si_xwrite = vfs_writef(file);
+ xi_sb = file_inode(file)->i_sb;
+ sbinfo->si_ximaxent = xi_sb->s_maxbytes;
+ if (unlikely(sbinfo->si_ximaxent < PAGE_SIZE)) {
+ err = -EIO;
+ pr_err("s_maxbytes(%llu) on %s is too small\n",
+ (u64)sbinfo->si_ximaxent, au_sbtype(xi_sb));
+ goto out_unset;
+ }
+ sbinfo->si_ximaxent /= sizeof(ino_t);
+
+ err = -ENOMEM;
+ if (!sbinfo->si_xib_buf)
+ sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS);
+ if (unlikely(!sbinfo->si_xib_buf))
+ goto out_unset;
+
+ sbinfo->si_xib_last_pindex = 0;
+ sbinfo->si_xib_next_bit = 0;
+ if (vfsub_f_size_read(file) < PAGE_SIZE) {
+ pos = 0;
+ err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf,
+ PAGE_SIZE, &pos);
+ if (unlikely(err != PAGE_SIZE))
+ goto out_free;
+ }
+ err = 0;
+ goto out; /* success */
+
+out_free:
+ if (sbinfo->si_xib_buf)
+ free_page((unsigned long)sbinfo->si_xib_buf);
+ sbinfo->si_xib_buf = NULL;
+ if (err >= 0)
+ err = -EIO;
+out_unset:
+ fput(sbinfo->si_xib);
+ sbinfo->si_xib = NULL;
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* xino for each branch */
+static void xino_clear_br(struct super_block *sb)
+{
+ aufs_bindex_t bindex, bbot;
+ struct au_branch *br;
+
+ bbot = au_sbbot(sb);
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ AuDebugOn(!br);
+ au_xino_put(br);
+ }
+}
+
+static void au_xino_set_br_shared(struct super_block *sb, struct au_branch *br,
+ aufs_bindex_t bshared)
+{
+ struct au_branch *brshared;
+
+ brshared = au_sbr(sb, bshared);
+ AuDebugOn(!brshared->br_xino);
+ AuDebugOn(!brshared->br_xino->xi_file);
+ if (br->br_xino != brshared->br_xino) {
+ au_xino_get(brshared);
+ au_xino_put(br);
+ br->br_xino = brshared->br_xino;
+ }
+}
+
+struct au_xino_do_set_br {
+ vfs_writef_t writef;
+ struct au_branch *br;
+ ino_t h_ino;
+ aufs_bindex_t bshared;
+};
+
+static int au_xino_do_set_br(struct super_block *sb, struct path *path,
+ struct au_xino_do_set_br *args)
+{
+ int err;
+ struct au_xi_calc calc;
+ struct file *file;
+ struct au_branch *br;
+ struct au_xi_new xinew = {
+ .base = path
+ };
+
+ br = args->br;
+ xinew.xi = br->br_xino;
+ au_xi_calc(sb, args->h_ino, &calc);
+ xinew.copy_src = au_xino_file(xinew.xi, calc.idx);
+ if (args->bshared >= 0)
+ /* shared xino */
+ au_xino_set_br_shared(sb, br, args->bshared);
+ else if (!xinew.xi) {
+ /* new xino */
+ err = au_xino_init(br, calc.idx, xinew.copy_src);
+ if (unlikely(err))
+ goto out;
+ }
+
+ /* force re-creating */
+ xinew.xi = br->br_xino;
+ xinew.idx = calc.idx;
+ mutex_lock(&xinew.xi->xi_mtx);
+ file = au_xi_new(sb, &xinew);
+ mutex_unlock(&xinew.xi->xi_mtx);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+ AuDebugOn(!file);
+
+ err = au_xino_do_write(args->writef, file, &calc, AUFS_ROOT_INO);
+ if (unlikely(err))
+ au_xino_put(br);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_xino_set_br(struct super_block *sb, struct path *path)
+{
+ int err;
+ aufs_bindex_t bindex, bbot;
+ struct au_xino_do_set_br args;
+ struct inode *inode;
+
+ SiMustWriteLock(sb);
+
+ bbot = au_sbbot(sb);
+ inode = d_inode(sb->s_root);
+ args.writef = au_sbi(sb)->si_xwrite;
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ args.h_ino = au_h_iptr(inode, bindex)->i_ino;
+ args.br = au_sbr(sb, bindex);
+ args.bshared = is_sb_shared(sb, bindex, bindex - 1);
+ err = au_xino_do_set_br(sb, path, &args);
+ if (unlikely(err))
+ break;
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+void au_xino_clr(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ au_xigen_clr(sb);
+ xino_clear_xib(sb);
+ xino_clear_br(sb);
+ dbgaufs_brs_del(sb, 0);
+ sbinfo = au_sbi(sb);
+ /* lvalue, do not call au_mntflags() */
+ au_opt_clr(sbinfo->si_mntflags, XINO);
+}
+
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xiopt, int remount)
+{
+ int err, skip;
+ struct dentry *dentry, *parent, *cur_dentry, *cur_parent;
+ struct qstr *dname, *cur_name;
+ struct file *cur_xino;
+ struct au_sbinfo *sbinfo;
+ struct path *path, *cur_path;
+
+ SiMustWriteLock(sb);
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ path = &xiopt->file->f_path;
+ dentry = path->dentry;
+ parent = dget_parent(dentry);
+ if (remount) {
+ skip = 0;
+ cur_xino = sbinfo->si_xib;
+ if (cur_xino) {
+ cur_path = &cur_xino->f_path;
+ cur_dentry = cur_path->dentry;
+ cur_parent = dget_parent(cur_dentry);
+ cur_name = &cur_dentry->d_name;
+ dname = &dentry->d_name;
+ skip = (cur_parent == parent
+ && au_qstreq(dname, cur_name));
+ dput(cur_parent);
+ }
+ if (skip)
+ goto out;
+ }
+
+ au_opt_set(sbinfo->si_mntflags, XINO);
+ err = au_xino_set_xib(sb, path);
+ /* si_x{read,write} are set */
+ if (!err)
+ err = au_xigen_set(sb, path);
+ if (!err)
+ err = au_xino_set_br(sb, path);
+ if (!err) {
+ dbgaufs_brs_add(sb, 0, /*topdown*/1);
+ goto out; /* success */
+ }
+
+ /* reset all */
+ AuIOErr("failed setting xino(%d).\n", err);
+ au_xino_clr(sb);
+
+out:
+ dput(parent);
+ return err;
+}
+
+/*
+ * create a xinofile at the default place/path.
+ */
+struct file *au_xino_def(struct super_block *sb)
+{
+ struct file *file;
+ char *page, *p;
+ struct au_branch *br;
+ struct super_block *h_sb;
+ struct path path;
+ aufs_bindex_t bbot, bindex, bwr;
+
+ br = NULL;
+ bbot = au_sbbot(sb);
+ bwr = -1;
+ for (bindex = 0; bindex <= bbot; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_writable(br->br_perm)
+ && !au_test_fs_bad_xino(au_br_sb(br))) {
+ bwr = bindex;
+ break;
+ }
+ }
+
+ if (bwr >= 0) {
+ file = ERR_PTR(-ENOMEM);
+ page = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!page))
+ goto out;
+ path.mnt = au_br_mnt(br);
+ path.dentry = au_h_dptr(sb->s_root, bwr);
+ p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME));
+ file = (void *)p;
+ if (!IS_ERR(p)) {
+ strcat(p, "/" AUFS_XINO_FNAME);
+ AuDbg("%s\n", p);
+ file = au_xino_create(sb, p, /*silent*/0);
+ }
+ free_page((unsigned long)page);
+ } else {
+ file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0);
+ if (IS_ERR(file))
+ goto out;
+ h_sb = file->f_path.dentry->d_sb;
+ if (unlikely(au_test_fs_bad_xino(h_sb))) {
+ pr_err("xino doesn't support %s(%s)\n",
+ AUFS_XINO_DEFPATH, au_sbtype(h_sb));
+ fput(file);
+ file = ERR_PTR(-EINVAL);
+ }
+ }
+
+out:
+ return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * initialize the xinofile for the specified branch @br
+ * at the place/path where @base_file indicates.
+ * test whether another branch is on the same filesystem or not,
+ * if found then share the xinofile with another branch.
+ */
+int au_xino_init_br(struct super_block *sb, struct au_branch *br, ino_t h_ino,
+ struct path *base)
+{
+ int err;
+ struct au_xino_do_set_br args = {
+ .h_ino = h_ino,
+ .br = br
+ };
+
+ args.writef = au_sbi(sb)->si_xwrite;
+ args.bshared = sbr_find_shared(sb, /*btop*/0, au_sbbot(sb),
+ au_br_sb(br));
+ err = au_xino_do_set_br(sb, base, &args);
+ if (unlikely(err))
+ au_xino_put(br);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * get an unused inode number from bitmap
+ */
+ino_t au_xino_new_ino(struct super_block *sb)
+{
+ ino_t ino;
+ unsigned long *p, pindex, ul, pend;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+ int free_bit, err;
+
+ if (!au_opt_test(au_mntflags(sb), XINO))
+ return iunique(sb, AUFS_FIRST_INO);
+
+ sbinfo = au_sbi(sb);
+ mutex_lock(&sbinfo->si_xib_mtx);
+ p = sbinfo->si_xib_buf;
+ free_bit = sbinfo->si_xib_next_bit;
+ if (free_bit < page_bits && !test_bit(free_bit, p))
+ goto out; /* success */
+ free_bit = find_first_zero_bit(p, page_bits);
+ if (free_bit < page_bits)
+ goto out; /* success */
+
+ pindex = sbinfo->si_xib_last_pindex;
+ for (ul = pindex - 1; ul < ULONG_MAX; ul--) {
+ err = xib_pindex(sb, ul);
+ if (unlikely(err))
+ goto out_err;
+ free_bit = find_first_zero_bit(p, page_bits);
+ if (free_bit < page_bits)
+ goto out; /* success */
+ }
+
+ file = sbinfo->si_xib;
+ pend = vfsub_f_size_read(file) / PAGE_SIZE;
+ for (ul = pindex + 1; ul <= pend; ul++) {
+ err = xib_pindex(sb, ul);
+ if (unlikely(err))
+ goto out_err;
+ free_bit = find_first_zero_bit(p, page_bits);
+ if (free_bit < page_bits)
+ goto out; /* success */
+ }
+ BUG();
+
+out:
+ set_bit(free_bit, p);
+ sbinfo->si_xib_next_bit = free_bit + 1;
+ pindex = sbinfo->si_xib_last_pindex;
+ mutex_unlock(&sbinfo->si_xib_mtx);
+ ino = xib_calc_ino(pindex, free_bit);
+ AuDbg("i%lu\n", (unsigned long)ino);
+ return ino;
+out_err:
+ mutex_unlock(&sbinfo->si_xib_mtx);
+ AuDbg("i0\n");
+ return 0;
+}
+
+/* for s_op->delete_inode() */
+void au_xino_delete_inode(struct inode *inode, const int unlinked)
+{
+ int err;
+ unsigned int mnt_flags;
+ aufs_bindex_t bindex, bbot, bi;
+ unsigned char try_trunc;
+ struct au_iinfo *iinfo;
+ struct super_block *sb;
+ struct au_hinode *hi;
+ struct inode *h_inode;
+ struct au_branch *br;
+ vfs_writef_t xwrite;
+ struct au_xi_calc calc;
+ struct file *file;
+
+ AuDebugOn(au_is_bad_inode(inode));
+
+ sb = inode->i_sb;
+ mnt_flags = au_mntflags(sb);
+ if (!au_opt_test(mnt_flags, XINO)
+ || inode->i_ino == AUFS_ROOT_INO)
+ return;
+
+ if (unlinked) {
+ au_xigen_inc(inode);
+ au_xib_clear_bit(inode);
+ }
+
+ iinfo = au_ii(inode);
+ bindex = iinfo->ii_btop;
+ if (bindex < 0)
+ return;
+
+ xwrite = au_sbi(sb)->si_xwrite;
+ try_trunc = !!au_opt_test(mnt_flags, TRUNC_XINO);
+ hi = au_hinode(iinfo, bindex);
+ bbot = iinfo->ii_bbot;
+ for (; bindex <= bbot; bindex++, hi++) {
+ h_inode = hi->hi_inode;
+ if (!h_inode
+ || (!unlinked && h_inode->i_nlink))
+ continue;
+
+ /* inode may not be revalidated */
+ bi = au_br_index(sb, hi->hi_id);
+ if (bi < 0)
+ continue;
+
+ br = au_sbr(sb, bi);
+ au_xi_calc(sb, h_inode->i_ino, &calc);
+ file = au_xino_file(br->br_xino, calc.idx);
+ if (IS_ERR_OR_NULL(file))
+ continue;
+
+ err = au_xino_do_write(xwrite, file, &calc, /*ino*/0);
+ if (!err && try_trunc
+ && au_test_fs_trunc_xino(au_br_sb(br)))
+ xino_try_trunc(sb, br);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_xinondir_find(struct au_xino *xi, ino_t h_ino)
+{
+ int found, total, i;
+
+ found = -1;
+ total = xi->xi_nondir.total;
+ for (i = 0; i < total; i++) {
+ if (xi->xi_nondir.array[i] != h_ino)
+ continue;
+ found = i;
+ break;
+ }
+
+ return found;
+}
+
+static int au_xinondir_expand(struct au_xino *xi)
+{
+ int err, sz;
+ ino_t *p;
+
+ BUILD_BUG_ON(KMALLOC_MAX_SIZE > INT_MAX);
+
+ err = -ENOMEM;
+ sz = xi->xi_nondir.total * sizeof(ino_t);
+ if (unlikely(sz > KMALLOC_MAX_SIZE / 2))
+ goto out;
+ p = au_kzrealloc(xi->xi_nondir.array, sz, sz << 1, GFP_ATOMIC,
+ /*may_shrink*/0);
+ if (p) {
+ xi->xi_nondir.array = p;
+ xi->xi_nondir.total <<= 1;
+ AuDbg("xi_nondir.total %d\n", xi->xi_nondir.total);
+ err = 0;
+ }
+
+out:
+ return err;
+}
+
+void au_xinondir_leave(struct super_block *sb, aufs_bindex_t bindex,
+ ino_t h_ino, int idx)
+{
+ struct au_xino *xi;
+
+ AuDebugOn(!au_opt_test(au_mntflags(sb), XINO));
+ xi = au_sbr(sb, bindex)->br_xino;
+ AuDebugOn(idx < 0 || xi->xi_nondir.total <= idx);
+
+ spin_lock(&xi->xi_nondir.spin);
+ AuDebugOn(xi->xi_nondir.array[idx] != h_ino);
+ xi->xi_nondir.array[idx] = 0;
+ spin_unlock(&xi->xi_nondir.spin);
+ wake_up_all(&xi->xi_nondir.wqh);
+}
+
+int au_xinondir_enter(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ int *idx)
+{
+ int err, found, empty;
+ struct au_xino *xi;
+
+ err = 0;
+ *idx = -1;
+ if (!au_opt_test(au_mntflags(sb), XINO))
+ goto out; /* no xino */
+
+ xi = au_sbr(sb, bindex)->br_xino;
+
+again:
+ spin_lock(&xi->xi_nondir.spin);
+ found = au_xinondir_find(xi, h_ino);
+ if (found == -1) {
+ empty = au_xinondir_find(xi, /*h_ino*/0);
+ if (empty == -1) {
+ empty = xi->xi_nondir.total;
+ err = au_xinondir_expand(xi);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+ xi->xi_nondir.array[empty] = h_ino;
+ *idx = empty;
+ } else {
+ spin_unlock(&xi->xi_nondir.spin);
+ wait_event(xi->xi_nondir.wqh,
+ xi->xi_nondir.array[found] != h_ino);
+ goto again;
+ }
+
+out_unlock:
+ spin_unlock(&xi->xi_nondir.spin);
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_xino_path(struct seq_file *seq, struct file *file)
+{
+ int err;
+
+ err = au_seq_path(seq, &file->f_path);
+ if (unlikely(err))
+ goto out;
+
+#define Deleted "\\040(deleted)"
+ seq->count -= sizeof(Deleted) - 1;
+ AuDebugOn(memcmp(seq->buf + seq->count, Deleted,
+ sizeof(Deleted) - 1));
+#undef Deleted
+
+out:
+ return err;
+}
diff --git a/fs/dcache.c b/fs/dcache.c
index aac41adf4743..50eb587c1253 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1247,7 +1247,7 @@ enum d_walk_ret {
*
* The @enter() callbacks are called with d_lock held.
*/
-static void d_walk(struct dentry *parent, void *data,
+void d_walk(struct dentry *parent, void *data,
enum d_walk_ret (*enter)(void *, struct dentry *))
{
struct dentry *this_parent;
@@ -1352,6 +1352,7 @@ rename_retry:
seq = 1;
goto again;
}
+EXPORT_SYMBOL_GPL(d_walk);
struct check_mount {
struct vfsmount *mnt;
@@ -2845,6 +2846,7 @@ void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
write_sequnlock(&rename_lock);
}
+EXPORT_SYMBOL_GPL(d_exchange);
/**
* d_ancestor - search for an ancestor
diff --git a/fs/exec.c b/fs/exec.c
index bcf383730bea..a97bdea3622e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -109,6 +109,7 @@ bool path_noexec(const struct path *path)
return (path->mnt->mnt_flags & MNT_NOEXEC) ||
(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
}
+EXPORT_SYMBOL_GPL(path_noexec);
#ifdef CONFIG_USELIB
/*
diff --git a/fs/fat/Kconfig b/fs/fat/Kconfig
index 3ff1772f612e..ca5c78bda750 100644
--- a/fs/fat/Kconfig
+++ b/fs/fat/Kconfig
@@ -74,6 +74,27 @@ config VFAT_FS
To compile this as a module, choose M here: the module will be called
vfat.
+config VFAT_FS_NO_DUALNAMES
+ bool "disable VFAT dual names support (patent workaround)"
+ depends on VFAT_FS
+ help
+ This option disables support for dual filenames on VFAT filesystems.
+ If this option is enabled then file creation will either put
+ a short (8.3) name or a long name on the file, but never both.
+ The field where a shortname would normally go is filled with
+ invalid characters such that it cannot be considered a valid
+ short filename.
+
+ That means that long filenames created with this option
+ disabled will not be accessible at all to operating systems
+ that do not understand the FAT long filename extensions.
+
+ Users considering disabling this option should consider the
+ implications of any patents that may exist on dual filenames
+ in VFAT.
+
+ If unsure, say N
+
config FAT_DEFAULT_CODEPAGE
int "Default codepage for FAT"
depends on MSDOS_FS || VFAT_FS
@@ -100,6 +121,17 @@ config FAT_DEFAULT_IOCHARSET
Enable any character sets you need in File Systems/Native Language
Support.
+config VFAT_NO_CREATE_WITH_LONGNAMES
+ bool "Disable creating files with long names"
+ depends on VFAT_FS
+ default n
+ help
+ Set this to disable support for creating files or directories with
+ names longer than 8.3 (the original DOS maximum file name length)
+ e.g. naming a file FILE1234.TXT would be allowed but creating or
+ renaming a file to FILE12345.TXT or FILE1234.TEXT would not
+ be permitted. Reading files with long file names is still permitted.
+
config FAT_DEFAULT_UTF8
bool "Enable FAT UTF-8 option by default"
depends on VFAT_FS
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 9d01db37183f..f997d538e84b 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -368,7 +368,8 @@ static int fat_parse_short(struct super_block *sb,
dotoffset = 1;
}
- memcpy(work, de->name, sizeof(work));
+ memcpy(work, de->name, sizeof(de->name));
+
/* For an explanation of the special treatment of 0x05 in
* filenames, see msdos_format_name in namei_msdos.c
*/
@@ -500,6 +501,13 @@ parse_record:
goto end_of_dir;
}
+ /*
+ * The FAT_NO_83NAME flag is used to mark files
+ * created with no 8.3 short name
+ */
+ if (de->lcase & FAT_NO_83NAME)
+ goto compare_longname;
+
/* Never prepend '.' to hidden files here.
* That is done only for msdos mounts (and only when
* 'dotsOK=yes'); if we are executing here, it is in the
@@ -513,6 +521,7 @@ parse_record:
if (fat_name_match(sbi, name, name_len, bufname, len))
goto found;
+compare_longname:
if (nr_slots) {
void *longname = unicode + FAT_MAX_UNI_CHARS;
int size = PATH_MAX - FAT_MAX_UNI_SIZE;
@@ -604,6 +613,8 @@ parse_record:
if (de->attr != ATTR_EXT && IS_FREE(de->name))
goto record_end;
} else {
+ if (de->lcase & FAT_NO_83NAME)
+ goto record_end;
if ((de->attr & ATTR_VOLUME) || IS_FREE(de->name))
goto record_end;
}
@@ -962,6 +973,10 @@ int fat_scan(struct inode *dir, const unsigned char *name,
sinfo->bh = NULL;
while (fat_get_short_entry(dir, &sinfo->slot_off, &sinfo->bh,
&sinfo->de) >= 0) {
+ /* skip files marked as having no 8.3 short name */
+ if (sinfo->de->lcase & FAT_NO_83NAME)
+ continue;
+
if (!strncmp(sinfo->de->name, name, MSDOS_NAME)) {
sinfo->slot_off -= sizeof(*sinfo->de);
sinfo->nr_slots = 1;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 996c8c25e9c6..2a84904c2507 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -21,6 +21,7 @@
#include <linux/namei.h>
#include <linux/kernel.h>
#include <linux/iversion.h>
+#include <linux/random.h>
#include "fat.h"
static inline unsigned long vfat_d_version(struct dentry *dentry)
@@ -335,6 +336,17 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
int is_shortname;
struct shortname_info base_info, ext_info;
+ unsigned opts_shortname = opts->shortname;
+
+#ifdef CONFIG_VFAT_FS_NO_DUALNAMES
+ /*
+ * When we do not have dualnames, we want to maximise the
+ * chance that a file will be able to be represented with just
+ * a 8.3 entry. We can do that by using the WINNT case
+ * handling extensions to FAT.
+ */
+ opts_shortname = VFAT_SFN_CREATE_WINNT;
+#endif
is_shortname = 1;
INIT_SHORTNAME_INFO(&base_info);
@@ -443,13 +455,22 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
memcpy(name_res, base, baselen);
memcpy(name_res + 8, ext, extlen);
*lcase = 0;
+
+#ifdef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+ if (is_shortname == 0)
+ return -ENAMETOOLONG;
+ if (!base_info.valid || !ext_info.valid)
+ return -EINVAL;
+ opts_shortname = VFAT_SFN_CREATE_WINNT;
+#endif
+
if (is_shortname && base_info.valid && ext_info.valid) {
if (vfat_find_form(dir, name_res) == 0)
return -EEXIST;
- if (opts->shortname & VFAT_SFN_CREATE_WIN95) {
+ if (opts_shortname & VFAT_SFN_CREATE_WIN95) {
return (base_info.upper && ext_info.upper);
- } else if (opts->shortname & VFAT_SFN_CREATE_WINNT) {
+ } else if (opts_shortname & VFAT_SFN_CREATE_WINNT) {
if ((base_info.upper || base_info.lower) &&
(ext_info.upper || ext_info.lower)) {
if (!base_info.upper && base_info.lower)
@@ -575,6 +596,66 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
return 0;
}
+#ifdef CONFIG_VFAT_FS_NO_DUALNAMES
+/*
+ * This function creates a dummy 8.3 entry which is as compatible as
+ * possible with existing FAT devices, while not being a valid
+ * filename under windows or Linux
+ */
+static void vfat_build_dummy_83_buffer(struct inode *dir, char *msdos_name,
+ int is_dir)
+{
+ /*
+ * These characters are all invalid in 8.3 names, plus have
+ * been shown to be harmless on all tested devices
+ */
+ const char invalidchar[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x0B,
+ 0x0C, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A,
+ 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x22, 0x2a,
+ 0x3a, 0x3c, 0x3e, 0x3f, 0x5b, 0x5d, 0x7c };
+ int i, tilde_pos, slash_pos;
+ u32 rand_num = prandom_u32();
+
+ /* We need a '~' in the prefix to make Win98 happy. */
+ tilde_pos = rand_num % 8;
+ rand_num >>= 3;
+
+ /*
+ * the '/' makes sure that even unpatched Linux systems can't
+ * get at files by the 8.3 entry. Don't put in a / in
+ * directories as it can cause problems with some
+ * photo frames
+ */
+ if (is_dir)
+ slash_pos = -1;
+ else {
+ slash_pos = (tilde_pos + 1 + rand_num % 7) % 8;
+ rand_num >>= 3;
+ }
+
+ /*
+ * fill in the first 8 bytes with invalid characters. Note
+ * that we need to be careful not to run out of randomness. We
+ * leave the 3 byte extension in place as some cheap MP3
+ * players need them.
+ */
+ for (i = 0; i < 8; i++) {
+ if (i == tilde_pos)
+ msdos_name[i] = '~';
+ else if (i == slash_pos)
+ msdos_name[i] = '/';
+ else {
+ msdos_name[i] =
+ invalidchar[rand_num % sizeof(invalidchar)];
+ rand_num /= sizeof(invalidchar);
+ if (rand_num < sizeof(invalidchar))
+ rand_num = prandom_u32();
+ }
+ }
+}
+#endif
+
static int vfat_build_slots(struct inode *dir, const unsigned char *name,
int len, int is_dir, int cluster,
struct timespec64 *ts,
@@ -582,15 +663,19 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
struct fat_mount_options *opts = &sbi->options;
- struct msdos_dir_slot *ps;
struct msdos_dir_entry *de;
- unsigned char cksum, lcase;
+ unsigned char lcase;
unsigned char msdos_name[MSDOS_NAME];
wchar_t *uname;
__le16 time, date;
u8 time_cs;
- int err, ulen, usize, i;
+ int err, ulen, usize;
+#ifndef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+ int i;
+ struct msdos_dir_slot *ps;
+ unsigned char cksum;
loff_t offset;
+#endif
*nr_slots = 0;
@@ -617,6 +702,16 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
goto shortname;
}
+#ifdef CONFIG_VFAT_FS_NO_DUALNAMES
+ printk_once(KERN_INFO
+ "VFAT: not creating 8.3 short filenames for long names\n");
+ vfat_build_dummy_83_buffer(dir, msdos_name, is_dir);
+ lcase = FAT_NO_83NAME;
+#endif
+
+#ifdef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+ de = (struct msdos_dir_entry *)slots;
+#else
/* build the entry of long file name */
cksum = fat_checksum(msdos_name);
@@ -634,6 +729,7 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
}
slots[0].id |= 0x40;
de = (struct msdos_dir_entry *)ps;
+#endif
shortname:
/* build the entry of 8.3 alias name */
@@ -1058,7 +1154,11 @@ static struct dentry *vfat_mount(struct file_system_type *fs_type,
static struct file_system_type vfat_fs_type = {
.owner = THIS_MODULE,
+#ifdef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+ .name = "lfat",
+#else
.name = "vfat",
+#endif
.mount = vfat_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
@@ -1078,6 +1178,9 @@ static void __exit exit_vfat_fs(void)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VFAT filesystem support");
MODULE_AUTHOR("Gordon Chaffee");
+#ifdef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+MODULE_ALIAS("lfat");
+#endif
module_init(init_vfat_fs)
module_exit(exit_vfat_fs)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 083185174c6d..2072f690b121 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -32,7 +32,7 @@
#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
-static int setfl(int fd, struct file * filp, unsigned long arg)
+int setfl(int fd, struct file * filp, unsigned long arg)
{
struct inode * inode = file_inode(filp);
int error = 0;
@@ -63,6 +63,8 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
if (filp->f_op->check_flags)
error = filp->f_op->check_flags(arg);
+ if (!error && filp->f_op->setfl)
+ error = filp->f_op->setfl(filp, arg);
if (error)
return error;
@@ -83,6 +85,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
out:
return error;
}
+EXPORT_SYMBOL_GPL(setfl);
static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
diff --git a/fs/file_table.c b/fs/file_table.c
index 5679e7fcb6b0..961eec3df1eb 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -161,6 +161,7 @@ over:
}
return ERR_PTR(-ENFILE);
}
+EXPORT_SYMBOL_GPL(alloc_empty_file);
/*
* Variant of alloc_empty_file() that doesn't check and modify nr_files.
@@ -323,6 +324,7 @@ void flush_delayed_fput(void)
{
delayed_fput(NULL);
}
+EXPORT_SYMBOL_GPL(flush_delayed_fput);
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
@@ -365,6 +367,7 @@ void __fput_sync(struct file *file)
}
EXPORT_SYMBOL(fput);
+EXPORT_SYMBOL_GPL(__fput_sync);
void __init files_init(void)
{
diff --git a/fs/inode.c b/fs/inode.c
index 73432e64f874..6e51d5f9b9f3 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1657,7 +1657,7 @@ EXPORT_SYMBOL(generic_update_time);
* This does the actual work of updating an inodes time or version. Must have
* had called mnt_want_write() before calling this.
*/
-static int update_time(struct inode *inode, struct timespec64 *time, int flags)
+int update_time(struct inode *inode, struct timespec64 *time, int flags)
{
int (*update_time)(struct inode *, struct timespec64 *, int);
@@ -1666,6 +1666,7 @@ static int update_time(struct inode *inode, struct timespec64 *time, int flags)
return update_time(inode, time, flags);
}
+EXPORT_SYMBOL_GPL(update_time);
/**
* touch_atime - update the access time
diff --git a/fs/namespace.c b/fs/namespace.c
index 678ef175d63a..8c4d3d56f2df 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -434,6 +434,7 @@ void __mnt_drop_write(struct vfsmount *mnt)
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
+EXPORT_SYMBOL_GPL(__mnt_drop_write);
/**
* mnt_drop_write - give up write access to a mount
@@ -768,6 +769,13 @@ static inline int check_mnt(struct mount *mnt)
return mnt->mnt_ns == current->nsproxy->mnt_ns;
}
+/* for aufs, CONFIG_AUFS_BR_FUSE */
+int is_current_mnt_ns(struct vfsmount *mnt)
+{
+ return check_mnt(real_mount(mnt));
+}
+EXPORT_SYMBOL_GPL(is_current_mnt_ns);
+
/*
* vfsmount lock must be held for write
*/
@@ -1836,6 +1844,7 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
}
return 0;
}
+EXPORT_SYMBOL_GPL(iterate_mounts);
static void cleanup_group_ids(struct mount *mnt, struct mount *end)
{
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 5f93cfacb3d1..73135eb5d0ae 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -39,6 +39,19 @@ config NFS_V2
If unsure, say Y.
+config NFS_DEF_FILE_IO_SIZE
+ int "Default size for NFS I/O read and write at runtime"
+ depends on NFS_FS
+ default "4096"
+ help
+ To change the default rsize and wsize supported by the NFS client,
+ adjust NFS_DEF_FILE_IO_SIZE. 64KB is a typical maximum, but some
+ servers can support a megabyte or more. The default is left at 4096
+ bytes, which is reasonable for NFS over UDP, however, for some
+ systems, setting a smaller value like 1024 can work around
+ limitations in the driver or hardware and result in overall
+ improved performance.
+
config NFS_V3
tristate "NFS client support for NFS version 3"
depends on NFS_FS
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 90d71fda65ce..43c9cb44b7cb 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -70,7 +69,7 @@ static const struct rpc_version *nfs_version[5] = {
[4] = NULL,
};
-const struct rpc_program nfs_program = {
+struct rpc_program nfs_program = {
.name = "nfs",
.number = NFS_PROGRAM,
.nrvers = ARRAY_SIZE(nfs_version),
@@ -161,6 +160,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
goto error_dealloc;
clp->rpc_ops = clp->cl_nfs_mod->rpc_ops;
+ clp->nfs_prog = cl_init->nfs_prog;
refcount_set(&clp->cl_count, 1);
clp->cl_cons_state = NFS_CS_INITING;
@@ -311,6 +311,9 @@ again:
/* Match nfsv4 minorversion */
if (clp->cl_minorversion != data->minorversion)
continue;
+ if (clp->nfs_prog != data->nfs_prog)
+ continue;
+
/* Match the full socket address */
if (!rpc_cmp_addr_port(sap, clap))
/* Match all xprt_switch full socket addresses */
@@ -514,6 +517,10 @@ int nfs_create_rpc_client(struct nfs_client *clp,
if (!IS_ERR(clp->cl_rpcclient))
return 0;
+ if (clp->nfs_prog)
+ nfs_program.number = clp->nfs_prog;
+ else
+ nfs_program.number = NFS_PROGRAM;
clnt = rpc_create(&args);
if (IS_ERR(clnt)) {
dprintk("%s: cannot create RPC client. Error = %ld\n",
@@ -652,6 +659,7 @@ static int nfs_init_server(struct nfs_server *server,
.proto = data->nfs_server.protocol,
.net = data->net,
.timeparms = &timeparms,
+ .nfs_prog = data->nfs_prog,
};
struct nfs_client *clp;
int error;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b1e577302518..6ee35be43803 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -83,6 +83,7 @@ struct nfs_client_initdata {
u32 minorversion;
struct net *net;
const struct rpc_timeout *timeparms;
+ int nfs_prog;
};
/*
@@ -94,6 +95,8 @@ struct nfs_parsed_mount_data {
unsigned int timeo, retrans;
unsigned int acregmin, acregmax,
acdirmin, acdirmax;
+ int nfs_prog;
+ int mount_prog;
unsigned int namlen;
unsigned int options;
unsigned int bsize;
@@ -150,11 +153,11 @@ struct nfs_mount_info {
struct nfs_fh *mntfh;
};
-extern int nfs_mount(struct nfs_mount_request *info);
+extern int nfs_mount(struct nfs_mount_request *info, int prog);
extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
-extern const struct rpc_program nfs_program;
+extern struct rpc_program nfs_program;
extern void nfs_clients_init(struct net *net);
extern struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *);
int nfs_create_rpc_client(struct nfs_client *, const struct nfs_client_initdata *, rpc_authflavor_t);
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index d979ff4fee7e..7035dc1797c2 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -66,7 +66,7 @@ enum {
MOUNTPROC3_EXPORT = 5,
};
-static const struct rpc_program mnt_program;
+static struct rpc_program mnt_program;
/*
* Defined by OpenGroup XNFS Version 3W, chapter 8
@@ -143,7 +143,7 @@ struct mnt_fhstatus {
* with the list from the server or a faked-up list if the server didn't
* provide one.
*/
-int nfs_mount(struct nfs_mount_request *info)
+int nfs_mount(struct nfs_mount_request *info, int m_prog)
{
struct mountres result = {
.fh = info->fh,
@@ -177,6 +177,7 @@ int nfs_mount(struct nfs_mount_request *info)
if (info->noresvport)
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
+ mnt_program.number = m_prog;
mnt_clnt = rpc_create(&args);
if (IS_ERR(mnt_clnt))
goto out_clnt_err;
@@ -530,7 +531,7 @@ static const struct rpc_version *mnt_version[] = {
static struct rpc_stat mnt_stats;
-static const struct rpc_program mnt_program = {
+static struct rpc_program mnt_program = {
.name = "mount",
.number = NFS_MNT_PROGRAM,
.nrvers = ARRAY_SIZE(mnt_version),
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 15c025c1a305..7acbb5b73f6c 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -102,6 +102,8 @@ enum {
Opt_mountport,
Opt_mountvers,
Opt_minorversion,
+ Opt_mountprog,
+ Opt_nfsprog,
/* Mount options that take string arguments */
Opt_nfsvers,
@@ -167,6 +169,8 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_mountport, "mountport=%s" },
{ Opt_mountvers, "mountvers=%s" },
{ Opt_minorversion, "minorversion=%s" },
+ { Opt_mountprog, "mountprog=%s" },
+ { Opt_nfsprog, "nfsprog=%s" },
{ Opt_nfsvers, "nfsvers=%s" },
{ Opt_nfsvers, "vers=%s" },
@@ -922,6 +926,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
data->acregmax = NFS_DEF_ACREGMAX;
data->acdirmin = NFS_DEF_ACDIRMIN;
data->acdirmax = NFS_DEF_ACDIRMAX;
+ data->nfs_prog = NFS_PROGRAM;
+ data->mount_prog = NFS_MNT_PROGRAM;
data->mount_server.port = NFS_UNSPEC_PORT;
data->nfs_server.port = NFS_UNSPEC_PORT;
data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
@@ -1379,6 +1385,26 @@ static int nfs_parse_mount_options(char *raw,
goto out_invalid_value;
mnt->acdirmax = option;
break;
+ case Opt_mountprog:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = kstrtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->mount_prog = option;
+ break;
+ case Opt_nfsprog:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = kstrtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->nfs_prog = option;
+ break;
case Opt_actimeo:
if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
@@ -1778,7 +1804,7 @@ static int nfs_request_mount(struct nfs_parsed_mount_data *args,
* Now ask the mount server to map our export path
* to a file handle.
*/
- status = nfs_mount(&request);
+ status = nfs_mount(&request,args->mount_prog);
if (status != 0) {
dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
request.hostname, status);
@@ -2087,6 +2113,9 @@ static int nfs23_validate_mount_data(void *options,
}
break;
+ case 7:
+ args->nfs_prog = (data->version >= 7) ? data->nfs_prog : NFS_PROGRAM;
+ break;
default:
return NFS_TEXT_DATA;
}
@@ -2149,6 +2178,7 @@ static int nfs_validate_text_mount_data(void *options,
int max_namelen = PAGE_SIZE;
int max_pathlen = NFS_MAXPATHLEN;
struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+ args->nfs_prog = NFS_PROGRAM;
if (nfs_parse_mount_options((char *)options, args) == 0)
return -EINVAL;
@@ -2775,6 +2805,8 @@ static int nfs4_validate_mount_data(void *options,
args->version = 4;
+ args->nfs_prog = NFS_PROGRAM;
+
switch (data->version) {
case 1:
if (data->host_addrlen > sizeof(args->nfs_server.address))
diff --git a/fs/notify/group.c b/fs/notify/group.c
index c03b83662876..817f22c6e191 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -23,6 +23,7 @@
#include <linux/rculist.h>
#include <linux/wait.h>
#include <linux/memcontrol.h>
+#include <linux/module.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
@@ -112,6 +113,7 @@ void fsnotify_get_group(struct fsnotify_group *group)
{
refcount_inc(&group->refcnt);
}
+EXPORT_SYMBOL_GPL(fsnotify_get_group);
/*
* Drop a reference to a group. Free it if it's through.
@@ -121,6 +123,7 @@ void fsnotify_put_group(struct fsnotify_group *group)
if (refcount_dec_and_test(&group->refcnt))
fsnotify_final_destroy_group(group);
}
+EXPORT_SYMBOL_GPL(fsnotify_put_group);
/*
* Create a new fsnotify_group and hold a reference for the group returned.
@@ -150,6 +153,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
return group;
}
+EXPORT_SYMBOL_GPL(fsnotify_alloc_group);
int fsnotify_fasync(int fd, struct file *file, int on)
{
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index d2dd16cb5989..cf709b7d611a 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -289,6 +289,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
queue_delayed_work(system_unbound_wq, &reaper_work,
FSNOTIFY_REAPER_DELAY);
}
+EXPORT_SYMBOL_GPL(fsnotify_put_mark);
/*
* Get mark reference when we found the mark via lockless traversal of object
@@ -443,6 +444,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
mutex_unlock(&group->mark_mutex);
fsnotify_free_mark(mark);
}
+EXPORT_SYMBOL_GPL(fsnotify_destroy_mark);
/*
* Sorting function for lists of fsnotify marks.
@@ -658,6 +660,7 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp,
mutex_unlock(&group->mark_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(fsnotify_add_mark);
/*
* Given a list of marks, find the mark associated with given group. If found
@@ -781,6 +784,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
fsnotify_get_group(group);
mark->group = group;
}
+EXPORT_SYMBOL_GPL(fsnotify_init_mark);
/*
* Destroy all marks in destroy_list, waits for SRCU period to finish before
diff --git a/fs/open.c b/fs/open.c
index a00350018a47..fc17a6a34071 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -64,6 +64,7 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
inode_unlock(dentry->d_inode);
return ret;
}
+EXPORT_SYMBOL_GPL(do_truncate);
long vfs_truncate(const struct path *path, loff_t length)
{
diff --git a/fs/proc/base.c b/fs/proc/base.c
index f5ed9512d193..e8f8e59d93bb 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2014,7 +2014,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
- *path = vma->vm_file->f_path;
+ *path = vma_pr_or_file(vma)->f_path;
path_get(path);
rc = 0;
}
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 3b63be64e436..fb9913bf3d10 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -45,7 +45,10 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
file = region->vm_file;
if (file) {
- struct inode *inode = file_inode(region->vm_file);
+ struct inode *inode;
+
+ file = vmr_pr_or_file(region);
+ inode = file_inode(file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 91bd2ff0c62c..d18f59db5cd4 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -305,7 +305,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
const char *name = NULL;
if (file) {
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode;
+
+ file = vma_pr_or_file(vma);
+ inode = file_inode(file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
@@ -1758,7 +1761,7 @@ static int show_numa_map(struct seq_file *m, void *v)
struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
struct vm_area_struct *vma = v;
struct numa_maps *md = &numa_priv->md;
- struct file *file = vma->vm_file;
+ struct file *file = vma_pr_or_file(vma);
struct mm_struct *mm = vma->vm_mm;
struct mm_walk walk = {
.hugetlb_entry = gather_hugetlb_stats,
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 0b63d68dedb2..400d1c594ceb 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -155,7 +155,10 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
file = vma->vm_file;
if (file) {
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode;
+
+ file = vma_pr_or_file(vma);
+ inode = file_inode(file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
diff --git a/fs/read_write.c b/fs/read_write.c
index 3d3194e32201..c08121f06ec7 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -459,6 +459,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
return ret;
}
+EXPORT_SYMBOL_GPL(vfs_read);
static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
{
@@ -489,6 +490,30 @@ ssize_t __vfs_write(struct file *file, const char __user *p, size_t count,
return -EINVAL;
}
+vfs_readf_t vfs_readf(struct file *file)
+{
+ const struct file_operations *fop = file->f_op;
+
+ if (fop->read)
+ return fop->read;
+ if (fop->read_iter)
+ return new_sync_read;
+ return ERR_PTR(-ENOSYS);
+}
+EXPORT_SYMBOL_GPL(vfs_readf);
+
+vfs_writef_t vfs_writef(struct file *file)
+{
+ const struct file_operations *fop = file->f_op;
+
+ if (fop->write)
+ return fop->write;
+ if (fop->write_iter)
+ return new_sync_write;
+ return ERR_PTR(-ENOSYS);
+}
+EXPORT_SYMBOL_GPL(vfs_writef);
+
ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
{
mm_segment_t old_fs;
@@ -557,6 +582,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
return ret;
}
+EXPORT_SYMBOL_GPL(vfs_write);
static inline loff_t file_pos_read(struct file *file)
{
diff --git a/fs/splice.c b/fs/splice.c
index c38c7e7a49c9..70bc50558848 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -837,8 +837,8 @@ EXPORT_SYMBOL(generic_splice_sendpage);
/*
* Attempt to initiate a splice from pipe to file.
*/
-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
- loff_t *ppos, size_t len, unsigned int flags)
+long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
{
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
loff_t *, size_t, unsigned int);
@@ -850,13 +850,14 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
return splice_write(pipe, out, ppos, len, flags);
}
+EXPORT_SYMBOL_GPL(do_splice_from);
/*
* Attempt to initiate a splice from a file to a pipe.
*/
-static long do_splice_to(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
+long do_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
{
ssize_t (*splice_read)(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
@@ -879,6 +880,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
return splice_read(in, ppos, pipe, len, flags);
}
+EXPORT_SYMBOL_GPL(do_splice_to);
/**
* splice_direct_to_actor - splices data directly between two non-pipes
diff --git a/fs/sync.c b/fs/sync.c
index b54e0541ad89..ffd7ea43831e 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -28,7 +28,7 @@
* wait == 1 case since in that case write_inode() functions do
* sync_dirty_buffer() and thus effectively write one block at a time.
*/
-static int __sync_filesystem(struct super_block *sb, int wait)
+int __sync_filesystem(struct super_block *sb, int wait)
{
if (wait)
sync_inodes_sb(sb);
@@ -39,6 +39,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
sb->s_op->sync_fs(sb, wait);
return __sync_blockdev(sb->s_bdev, wait);
}
+EXPORT_SYMBOL_GPL(__sync_filesystem);
/*
* Write out and wait upon all dirty data associated with this
diff --git a/fs/xattr.c b/fs/xattr.c
index 0d6a6a4af861..7ce4701b7289 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -295,6 +295,7 @@ vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
*xattr_value = value;
return error;
}
+EXPORT_SYMBOL_GPL(vfs_getxattr_alloc);
ssize_t
__vfs_getxattr(struct dentry *dentry, struct inode *inode, const char *name,
diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
new file mode 100644
index 000000000000..408570fc7a5e
--- /dev/null
+++ b/fs/yaffs2/Kconfig
@@ -0,0 +1,171 @@
+#
+# yaffs file system configurations
+#
+
+config YAFFS_FS
+ tristate "yaffs2 file system support"
+ default n
+ depends on MTD_BLOCK
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+ yaffs2, or Yet Another Flash File System, is a file system
+ optimised for NAND Flash chips.
+
+ To compile the yaffs2 file system support as a module, choose M
+ here: the module will be called yaffs2.
+
+ If unsure, say N.
+
+ Further information on yaffs2 is available at
+ <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+ bool "512 byte / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable yaffs1 support -- yaffs for 512 byte / page devices
+
+ Not needed for 2K-page devices.
+
+ If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+ bool "Use older-style on-NAND data format with pageStatus byte"
+ depends on YAFFS_YAFFS1
+ default n
+ help
+
+ Older-style on-NAND data format has a "pageStatus" byte to record
+ chunk/page state. This byte is zero when the page is discarded.
+ Choose this option if you have existing on-NAND data using this
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+ older-style format. See notes on tags formats and MTD versions
+ in yaffs_mtdif1.c.
+
+ If unsure, say N.
+
+config YAFFS_DOES_ECC
+ bool "Lets yaffs do its own ECC"
+ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This enables yaffs to use its own ECC functions instead of using
+ the ones from the generic MTD-NAND driver.
+
+ If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This makes yaffs_ecc.c use the same ecc byte order as Steven
+ Hill's nand_ecc.c. If not set, then you get the same ecc byte
+ order as SmartMedia.
+
+ If unsure, say N.
+
+config YAFFS_YAFFS2
+ bool "2048 byte (or larger) / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable yaffs2 support -- yaffs for >= 2K bytes per page devices
+
+ If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+ bool "Autoselect yaffs2 format"
+ depends on YAFFS_YAFFS2
+ default y
+ help
+ Without this, you need to explicitely use yaffs2 as the file
+ system type. With this, you can say "yaffs" and yaffs or yaffs2
+ will be used depending on the device page size (yaffs on
+ 512-byte page devices, yaffs2 on 2K page devices).
+
+ If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+ bool "Disable yaffs from doing ECC on tags by default"
+ depends on YAFFS_FS && YAFFS_YAFFS2
+ default n
+ help
+ This defaults yaffs to using its own ECC calculations on tags instead of
+ just relying on the MTD.
+ This behavior can also be overridden with tags_ecc_on and
+ tags_ecc_off mount options.
+
+ If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ bool "Force chunk erase check"
+ depends on YAFFS_FS
+ default n
+ help
+ Normally yaffs only checks chunks before writing until an erased
+ chunk is found. This helps to detect any partially written
+ chunks that might have happened due to power loss.
+
+ Enabling this forces on the test that chunks are erased in flash
+ before writing to them. This takes more time but is potentially
+ a bit more secure.
+
+ Suggest setting Y during development and ironing out driver
+ issues etc. Suggest setting to N if you want faster writing.
+
+ If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+ bool "Empty lost and found on boot"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is enabled then the contents of lost and found is
+ automatically dumped at mount.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+ bool "Disable yaffs2 block refreshing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then block refreshing is disabled.
+ Block refreshing infrequently refreshes the oldest block in
+ a yaffs2 file system. This mechanism helps to refresh flash to
+ mitigate against data loss. This is particularly useful for MLC.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+ bool "Disable yaffs2 background processing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then background processing is disabled.
+ Background processing makes many foreground activities faster.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BAD_BLOCK_MARKING
+ bool "Disable yaffs2 bad block marking"
+ depends on YAFFS_FS
+ default n
+ help
+ Useful during early flash bring up to prevent problems causing
+ lots of bad block marking.
+
+ If unsure, say N.
+
+config YAFFS_XATTR
+ bool "Enable yaffs2 xattr support"
+ depends on YAFFS_FS
+ default y
+ help
+ If this is set then yaffs2 will provide xattr support.
+ If unsure, say Y.
diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
new file mode 100644
index 000000000000..f9a9fb1bfdd4
--- /dev/null
+++ b/fs/yaffs2/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o
+yaffs-y += yaffs_mtdif.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_summary.o
+yaffs-y += yaffs_verify.o
+
diff --git a/fs/yaffs2/yaffs_allocator.c b/fs/yaffs2/yaffs_allocator.c
new file mode 100644
index 000000000000..c8f2861c53ee
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.c
@@ -0,0 +1,357 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+/*
+ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks
+ * of approx 100 objects that are themn allocated singly.
+ * This is basically a simplified slab allocator.
+ *
+ * We don't use the Linux slab allocator because slab does not allow
+ * us to dump all the objects in one hit when we do a umount and tear
+ * down all the tnodes and objects. slab requires that we first free
+ * the individual objects.
+ *
+ * Once yaffs has been mainlined I shall try to motivate for a change
+ * to slab to provide the extra features we need here.
+ */
+
+struct yaffs_tnode_list {
+ struct yaffs_tnode_list *next;
+ struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+ struct yaffs_obj_list *next;
+ struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+ int n_tnodes_created;
+ struct yaffs_tnode *free_tnodes;
+ int n_free_tnodes;
+ struct yaffs_tnode_list *alloc_tnode_list;
+
+ int n_obj_created;
+ struct list_head free_objs;
+ int n_free_objects;
+
+ struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode_list *tmp;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ while (allocator->alloc_tnode_list) {
+ tmp = allocator->alloc_tnode_list->next;
+
+ kfree(allocator->alloc_tnode_list->tnodes);
+ kfree(allocator->alloc_tnode_list);
+ allocator->alloc_tnode_list = tmp;
+ }
+
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ allocator->alloc_tnode_list = NULL;
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ int i;
+ struct yaffs_tnode *new_tnodes;
+ u8 *mem;
+ struct yaffs_tnode *curr;
+ struct yaffs_tnode *next;
+ struct yaffs_tnode_list *tnl;
+
+ if (!allocator) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_tnodes < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+ mem = (u8 *) new_tnodes;
+
+ if (!new_tnodes) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: Could not allocate Tnodes");
+ return YAFFS_FAIL;
+ }
+
+ /* New hookup for wide tnodes */
+ for (i = 0; i < n_tnodes - 1; i++) {
+ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+ curr->internal[0] = next;
+ }
+
+ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+ curr->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+ allocator->n_free_tnodes += n_tnodes;
+ allocator->n_tnodes_created += n_tnodes;
+
+ /* Now add this bunch of tnodes to a list for freeing up.
+ * NB If we can't add this to the management list it isn't fatal
+ * but it just means we can't free this bunch of tnodes later.
+ */
+ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+ if (!tnl) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Could not add tnodes to management list");
+ return YAFFS_FAIL;
+ } else {
+ tnl->tnodes = new_tnodes;
+ tnl->next = allocator->alloc_tnode_list;
+ allocator->alloc_tnode_list = tnl;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added");
+
+ return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode *tn = NULL;
+
+ if (!allocator) {
+ BUG();
+ return NULL;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_tnodes)
+ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+ if (allocator->free_tnodes) {
+ tn = allocator->free_tnodes;
+ allocator->free_tnodes = allocator->free_tnodes->internal[0];
+ allocator->n_free_tnodes--;
+ }
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ if (tn) {
+ tn->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = tn;
+ allocator->n_free_tnodes++;
+ }
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+/*--------------- yaffs_obj alloaction ------------------------
+ *
+ * Free yaffs_objs are stored in a list using obj->siblings.
+ * The blocks of allocated objects are stored in a linked list.
+ */
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ allocator->allocated_obj_list = NULL;
+ INIT_LIST_HEAD(&allocator->free_objs);
+ allocator->n_free_objects = 0;
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ struct yaffs_obj_list *tmp;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ while (allocator->allocated_obj_list) {
+ tmp = allocator->allocated_obj_list->next;
+ kfree(allocator->allocated_obj_list->objects);
+ kfree(allocator->allocated_obj_list);
+ allocator->allocated_obj_list = tmp;
+ }
+
+ INIT_LIST_HEAD(&allocator->free_objs);
+ allocator->n_free_objects = 0;
+ allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ int i;
+ struct yaffs_obj *new_objs;
+ struct yaffs_obj_list *list;
+
+ if (!allocator) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_obj < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+ if (!new_objs || !list) {
+ kfree(new_objs);
+ new_objs = NULL;
+ kfree(list);
+ list = NULL;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Could not allocate more objects");
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+ for (i = 0; i < n_obj; i++)
+ list_add(&new_objs[i].siblings, &allocator->free_objs);
+
+ allocator->n_free_objects += n_obj;
+ allocator->n_obj_created += n_obj;
+
+ /* Now add this bunch of Objects to a list for freeing up. */
+
+ list->objects = new_objs;
+ list->next = allocator->allocated_obj_list;
+ allocator->allocated_obj_list = list;
+
+ return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = NULL;
+ struct list_head *lh;
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return obj;
+ }
+
+ /* If there are none left make more */
+ if (list_empty(&allocator->free_objs))
+ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+ if (!list_empty(&allocator->free_objs)) {
+ lh = allocator->free_objs.next;
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ list_del_init(lh);
+ allocator->n_free_objects--;
+ }
+
+ return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ /* Link into the free list. */
+ list_add(&obj->siblings, &allocator->free_objs);
+ allocator->n_free_objects++;
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+
+ if (!dev->allocator) {
+ BUG();
+ return;
+ }
+
+ yaffs_deinit_raw_tnodes(dev);
+ yaffs_deinit_raw_objs(dev);
+ kfree(dev->allocator);
+ dev->allocator = NULL;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator;
+
+ if (dev->allocator) {
+ BUG();
+ return;
+ }
+
+ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+ if (allocator) {
+ dev->allocator = allocator;
+ yaffs_init_raw_tnodes(dev);
+ yaffs_init_raw_objs(dev);
+ }
+}
+
diff --git a/fs/yaffs2/yaffs_allocator.h b/fs/yaffs2/yaffs_allocator.h
new file mode 100644
index 000000000000..a8cc3226421f
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
diff --git a/fs/yaffs2/yaffs_attribs.c b/fs/yaffs2/yaffs_attribs.c
new file mode 100644
index 000000000000..fdc7f2162ce6
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.c
@@ -0,0 +1,124 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+ obj->yst_uid = oh->yst_uid;
+ obj->yst_gid = oh->yst_gid;
+ obj->yst_atime = oh->yst_atime;
+ obj->yst_mtime = oh->yst_mtime;
+ obj->yst_ctime = oh->yst_ctime;
+ obj->yst_rdev = oh->yst_rdev;
+}
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+ oh->yst_uid = obj->yst_uid;
+ oh->yst_gid = obj->yst_gid;
+ oh->yst_atime = obj->yst_atime;
+ oh->yst_mtime = obj->yst_mtime;
+ oh->yst_ctime = obj->yst_ctime;
+ oh->yst_rdev = obj->yst_rdev;
+
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+ obj->yst_mtime = Y_CURRENT_TIME;
+ if (do_a)
+ obj->yst_atime = obj->yst_mtime;
+ if (do_c)
+ obj->yst_ctime = obj->yst_mtime;
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+ yaffs_load_current_time(obj, 1, 1);
+ obj->yst_rdev = rdev;
+ obj->yst_uid = uid;
+ obj->yst_gid = gid;
+}
+
+static loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+ YCHAR *alias = NULL;
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return obj->variant.file_variant.file_size;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = obj->variant.symlink_variant.alias;
+ if (!alias)
+ return 0;
+ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+ default:
+ return 0;
+ }
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = attr->ia_valid;
+
+ if (valid & ATTR_MODE)
+ obj->yst_mode = attr->ia_mode;
+ if (valid & ATTR_UID)
+ obj->yst_uid = ia_uid_read(attr);
+ if (valid & ATTR_GID)
+ obj->yst_gid = ia_gid_read(attr);
+
+ if (valid & ATTR_ATIME)
+ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+ if (valid & ATTR_CTIME)
+ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+ if (valid & ATTR_MTIME)
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+ yaffs_resize_file(obj, attr->ia_size);
+
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = 0;
+
+ attr->ia_mode = obj->yst_mode;
+ valid |= ATTR_MODE;
+ ia_uid_write( attr, obj->yst_uid );
+ valid |= ATTR_UID;
+ ia_gid_write( attr, obj->yst_gid );
+ valid |= ATTR_GID;
+
+ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+ valid |= ATTR_ATIME;
+ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+ valid |= ATTR_CTIME;
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_attribs.h b/fs/yaffs2/yaffs_attribs.h
new file mode 100644
index 000000000000..121d99d38b48
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.h
@@ -0,0 +1,71 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+static inline uid_t ia_uid_read(const struct iattr *iattr)
+{
+ return from_kuid(&init_user_ns, iattr->ia_uid);
+}
+
+static inline gid_t ia_gid_read(const struct iattr *iattr)
+{
+ return from_kgid(&init_user_ns, iattr->ia_gid);
+}
+
+static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
+{
+ iattr->ia_uid = make_kuid(&init_user_ns, uid);
+}
+
+static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
+{
+ iattr->ia_gid = make_kgid(&init_user_ns, gid);
+}
+#else
+static inline uid_t ia_uid_read(const struct iattr *iattr)
+{
+ return iattr->ia_uid;
+}
+
+static inline gid_t ia_gid_read(const struct iattr *inode)
+{
+ return iattr->ia_gid;
+}
+
+static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
+{
+ iattr->ia_uid = uid;
+}
+
+static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
+{
+ iattr->ia_gid = gid;
+}
+#endif
+
+#endif
diff --git a/fs/yaffs2/yaffs_bitmap.c b/fs/yaffs2/yaffs_bitmap.c
new file mode 100644
index 000000000000..4440e930d6bc
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.c
@@ -0,0 +1,97 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "BlockBits block %d is not valid",
+ blk);
+ BUG();
+ }
+ return dev->chunk_bits +
+ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+ chunk < 0 || chunk >= dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Chunk Id (%d:%d) invalid",
+ blk, chunk);
+ BUG();
+ }
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++) {
+ if (*blk_bits)
+ return 1;
+ blk_bits++;
+ }
+ return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ int n = 0;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+ n += hweight8(*blk_bits);
+
+ return n;
+}
diff --git a/fs/yaffs2/yaffs_bitmap.h b/fs/yaffs2/yaffs_bitmap.h
new file mode 100644
index 000000000000..e26b37d89ae4
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c
new file mode 100644
index 000000000000..e739fb4a104b
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,474 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+struct yaffs_checkpt_chunk_hdr {
+ int version;
+ int seq;
+ u32 sum;
+ u32 xor;
+} ;
+
+
+static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
+{
+ return chunk - dev->chunk_offset;
+}
+
+static int apply_block_offset(struct yaffs_dev *dev, int block)
+{
+ return block - dev->block_offset;
+}
+
+static void yaffs2_checkpt_init_chunk_hdr(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_chunk_hdr hdr;
+
+ hdr.version = YAFFS_CHECKPOINT_VERSION;
+ hdr.seq = dev->checkpt_page_seq;
+ hdr.sum = dev->checkpt_sum;
+ hdr.xor = dev->checkpt_xor;
+
+ dev->checkpt_byte_offs = sizeof(hdr);
+
+ memcpy(dev->checkpt_buffer, &hdr, sizeof(hdr));
+}
+
+static int yaffs2_checkpt_check_chunk_hdr(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_chunk_hdr hdr;
+
+ memcpy(&hdr, dev->checkpt_buffer, sizeof(hdr));
+
+ dev->checkpt_byte_offs = sizeof(hdr);
+
+ return hdr.version == YAFFS_CHECKPOINT_VERSION &&
+ hdr.seq == dev->checkpt_page_seq &&
+ hdr.sum == dev->checkpt_sum &&
+ hdr.xor == dev->checkpt_xor;
+}
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpt blocks_avail = %d", blocks_avail);
+
+ return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (!dev->drv.drv_erase_fn)
+ return 0;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checking blocks %d to %d",
+ dev->internal_start_block, dev->internal_end_block);
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ int offset_i = apply_block_offset(dev, i);
+ int result;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "erasing checkpt block %d", i);
+
+ dev->n_erasures++;
+
+ result = dev->drv.drv_erase_fn(dev, offset_i);
+ if(result) {
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ dev->n_free_chunks +=
+ dev->param.chunks_per_block;
+ } else {
+ dev->drv.drv_mark_bad_fn(dev, offset_i);
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+ int i;
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
+ dev->n_erased_blocks, dev->param.n_reserved_blocks,
+ blocks_avail, dev->checkpt_next_block);
+
+ if (dev->checkpt_next_block >= 0 &&
+ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocks_avail > 0) {
+
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ struct yaffs_block_info *bi;
+
+ bi = yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpt_next_block = i + 1;
+ dev->checkpt_cur_block = i;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block %d", i);
+ return;
+ }
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_ext_tags tags;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: start: blocks %d next %d",
+ dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ int chunk = i * dev->param.chunks_per_block;
+ enum yaffs_block_state state;
+ u32 seq;
+
+ dev->tagger.read_chunk_tags_fn(dev,
+ apply_chunk_offset(dev, chunk),
+ NULL, &tags);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: search: block %d state %d oid %d seq %d eccr %d",
+ i, (int) state,
+ tags.obj_id, tags.seq_number,
+ tags.ecc_result);
+
+ if (tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ continue;
+
+ dev->tagger.query_block_fn(dev,
+ apply_block_offset(dev, i),
+ &state, &seq);
+ if (state == YAFFS_BLOCK_STATE_DEAD)
+ continue;
+
+ /* Right kind of block */
+ dev->checkpt_next_block = tags.obj_id;
+ dev->checkpt_cur_block = i;
+ dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
+ dev->blocks_in_checkpt++;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "found checkpt block %d", i);
+ return;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+ int i;
+
+ dev->checkpt_open_write = writing;
+
+ /* Got the functions we need? */
+ if (!dev->tagger.write_chunk_tags_fn ||
+ !dev->tagger.read_chunk_tags_fn ||
+ !dev->drv.drv_erase_fn ||
+ !dev->drv.drv_mark_bad_fn)
+ return 0;
+
+ if (writing && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+ if (!dev->checkpt_buffer)
+ dev->checkpt_buffer =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ dev->checkpt_page_seq = 0;
+ dev->checkpt_byte_count = 0;
+ dev->checkpt_sum = 0;
+ dev->checkpt_xor = 0;
+ dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_chunk = -1;
+ dev->checkpt_next_block = dev->internal_start_block;
+
+ if (writing) {
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+ yaffs2_checkpt_init_chunk_hdr(dev);
+ return yaffs_checkpt_erase(dev);
+ }
+
+ /* Opening for a read */
+ /* Set to a value that will kick off a read */
+ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is
+ * (hopefully) going to be way more than we need */
+ dev->blocks_in_checkpt = 0;
+ dev->checkpt_max_blocks =
+ (dev->internal_end_block - dev->internal_start_block) / 16 + 2;
+ dev->checkpt_block_list =
+ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+
+ if (!dev->checkpt_block_list)
+ return 0;
+
+ for (i = 0; i < dev->checkpt_max_blocks; i++)
+ dev->checkpt_block_list[i] = -1;
+
+ return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+ u32 composite_sum;
+
+ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff);
+ *sum = composite_sum;
+ return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+ int chunk;
+ int offset_chunk;
+ struct yaffs_ext_tags tags;
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_erased_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+ tags.is_deleted = 0;
+ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
+ tags.chunk_id = dev->checkpt_page_seq + 1;
+ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.n_bytes = dev->data_bytes_per_chunk;
+ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->checkpt_cur_block);
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocks_in_checkpt++;
+ }
+
+ chunk =
+ dev->checkpt_cur_block * dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+ tags.obj_id, tags.chunk_id);
+
+ offset_chunk = apply_chunk_offset(dev, chunk);
+
+ dev->n_page_writes++;
+
+ dev->tagger.write_chunk_tags_fn(dev, offset_chunk,
+ dev->checkpt_buffer, &tags);
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+ dev->checkpt_cur_chunk = 0;
+ dev->checkpt_cur_block = -1;
+ }
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ yaffs2_checkpt_init_chunk_hdr(dev);
+
+
+ return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (!dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ struct yaffs_ext_tags tags;
+ int chunk;
+ int offset_chunk;
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0) {
+ ok = 0;
+ break;
+ }
+
+ chunk = dev->checkpt_cur_block *
+ dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ offset_chunk = apply_chunk_offset(dev, chunk);
+ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ dev->tagger.read_chunk_tags_fn(dev,
+ offset_chunk,
+ dev->checkpt_buffer,
+ &tags);
+
+ if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
+ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ ok = 0;
+ break;
+ }
+ if(!yaffs2_checkpt_check_chunk_hdr(dev)) {
+ ok = 0;
+ break;
+ }
+
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+
+ if (dev->checkpt_cur_chunk >=
+ dev->param.chunks_per_block)
+ dev->checkpt_cur_block = -1;
+
+ }
+
+ *data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+ }
+
+ return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->checkpt_open_write) {
+ if (dev->checkpt_byte_offs !=
+ sizeof(sizeof(struct yaffs_checkpt_chunk_hdr)))
+ yaffs2_checkpt_flush_buffer(dev);
+ } else if (dev->checkpt_block_list) {
+ for (i = 0;
+ i < dev->blocks_in_checkpt &&
+ dev->checkpt_block_list[i] >= 0; i++) {
+ int blk = dev->checkpt_block_list[i];
+ struct yaffs_block_info *bi = NULL;
+
+ if (dev->internal_start_block <= blk &&
+ blk <= dev->internal_end_block)
+ bi = yaffs_get_block_info(dev, blk);
+ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ }
+ kfree(dev->checkpt_block_list);
+ dev->checkpt_block_list = NULL;
+ }
+
+ dev->n_free_chunks -=
+ dev->blocks_in_checkpt * dev->param.chunks_per_block;
+ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d",
+ dev->checkpt_byte_count);
+
+ if (dev->checkpt_buffer) {
+ /* free the buffer */
+ kfree(dev->checkpt_buffer);
+ dev->checkpt_buffer = NULL;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+ /* Erase the checkpoint data */
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint invalidate of %d blocks",
+ dev->blocks_in_checkpt);
+
+ return yaffs_checkpt_erase(dev);
+}
diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h
new file mode 100644
index 000000000000..cdbaba7153ed
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c
new file mode 100644
index 000000000000..9294107c150d
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,281 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two
+ * such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity,
+ * and therefore this bytes influence on the line parity.
+ */
+
+static const unsigned char column_parity_table[] = {
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc)
+{
+ unsigned int i;
+ unsigned char col_parity = 0;
+ unsigned char line_parity = 0;
+ unsigned char line_parity_prime = 0;
+ unsigned char t;
+ unsigned char b;
+
+ for (i = 0; i < 256; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+
+ t = 0;
+ if (line_parity & 0x80)
+ t |= 0x80;
+ if (line_parity_prime & 0x80)
+ t |= 0x40;
+ if (line_parity & 0x40)
+ t |= 0x20;
+ if (line_parity_prime & 0x40)
+ t |= 0x10;
+ if (line_parity & 0x20)
+ t |= 0x08;
+ if (line_parity_prime & 0x20)
+ t |= 0x04;
+ if (line_parity & 0x10)
+ t |= 0x02;
+ if (line_parity_prime & 0x10)
+ t |= 0x01;
+ ecc[1] = ~t;
+
+ t = 0;
+ if (line_parity & 0x08)
+ t |= 0x80;
+ if (line_parity_prime & 0x08)
+ t |= 0x40;
+ if (line_parity & 0x04)
+ t |= 0x20;
+ if (line_parity_prime & 0x04)
+ t |= 0x10;
+ if (line_parity & 0x02)
+ t |= 0x08;
+ if (line_parity_prime & 0x02)
+ t |= 0x04;
+ if (line_parity & 0x01)
+ t |= 0x02;
+ if (line_parity_prime & 0x01)
+ t |= 0x01;
+ ecc[0] = ~t;
+
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+{
+ unsigned char d0, d1, d2; /* deltas */
+
+ d0 = read_ecc[0] ^ test_ecc[0];
+ d1 = read_ecc[1] ^ test_ecc[1];
+ d2 = read_ecc[2] ^ test_ecc[2];
+
+ if ((d0 | d1 | d2) == 0)
+ return 0; /* no error */
+
+ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+ /* Single bit (recoverable) error in data */
+
+ unsigned byte;
+ unsigned bit;
+
+ bit = byte = 0;
+
+ if (d1 & 0x80)
+ byte |= 0x80;
+ if (d1 & 0x20)
+ byte |= 0x40;
+ if (d1 & 0x08)
+ byte |= 0x20;
+ if (d1 & 0x02)
+ byte |= 0x10;
+ if (d0 & 0x80)
+ byte |= 0x08;
+ if (d0 & 0x20)
+ byte |= 0x04;
+ if (d0 & 0x08)
+ byte |= 0x02;
+ if (d0 & 0x02)
+ byte |= 0x01;
+
+ if (d2 & 0x80)
+ bit |= 0x04;
+ if (d2 & 0x20)
+ bit |= 0x02;
+ if (d2 & 0x08)
+ bit |= 0x01;
+
+ data[byte] ^= (1 << bit);
+
+ return 1; /* Corrected the error */
+ }
+
+ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+ read_ecc[1] = test_ecc[1];
+ read_ecc[2] = test_ecc[2];
+
+ return 1; /* Corrected the error */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc_other)
+{
+ unsigned int i;
+ unsigned char col_parity = 0;
+ unsigned line_parity = 0;
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) {
+ /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+ ecc_other->line_parity = line_parity;
+ ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc)
+{
+ unsigned char delta_col; /* column parity delta */
+ unsigned delta_line; /* line parity delta */
+ unsigned delta_line_prime; /* line parity delta */
+ unsigned bit;
+
+ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+ delta_line_prime =
+ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+ if ((delta_col | delta_line | delta_line_prime) == 0)
+ return 0; /* no error */
+
+ if (delta_line == ~delta_line_prime &&
+ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+
+ if (delta_col & 0x20)
+ bit |= 0x04;
+ if (delta_col & 0x08)
+ bit |= 0x02;
+ if (delta_col & 0x02)
+ bit |= 0x01;
+
+ if (delta_line >= n_bytes)
+ return -1;
+
+ data[delta_line] ^= (1 << bit);
+
+ return 1; /* corrected */
+ }
+
+ if ((hweight32(delta_line) +
+ hweight32(delta_line_prime) +
+ hweight8(delta_col)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+ return 1; /* corrected */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+}
diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h
new file mode 100644
index 000000000000..17d47bd80f3e
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data.
+ * Thus, two such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+ unsigned char col_parity;
+ unsigned line_parity;
+ unsigned line_parity_prime;
+};
+
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc);
+#endif
diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h
new file mode 100644
index 000000000000..8fd0802bdddb
--- /dev/null
+++ b/fs/yaffs2/yaffs_getblockinfo.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+ *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs: get_block_info block %d is not valid",
+ blk);
+ BUG();
+ }
+ return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 000000000000..1fd464d68af7
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5082 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsmarshall.h"
+#include "yaffs_nand.h"
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 *buffer, int n_bytes, int use_reserve);
+
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+ int buffer_size);
+
+/* Function to calculate chunk and offset */
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 *offset_out)
+{
+ int chunk;
+ u32 offset;
+
+ chunk = (u32) (addr >> dev->chunk_shift);
+
+ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+ offset = (u32) (addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunk_base;
+
+ chunk /= dev->chunk_div;
+
+ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+ offset = (u32) (addr - chunk_base);
+ }
+
+ *chunk_out = chunk;
+ *offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static inline u32 calc_shifts_ceiling(u32 x)
+{
+ int extra_bits;
+ int shifts;
+
+ shifts = extra_bits = 0;
+
+ while (x > 1) {
+ if (x & 1)
+ extra_bits++;
+ x >>= 1;
+ shifts++;
+ }
+
+ if (extra_bits)
+ shifts++;
+
+ return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static inline u32 calc_shifts(u32 x)
+{
+ u32 shifts;
+
+ shifts = 0;
+
+ if (!x)
+ return 0;
+
+ while (!(x & 1)) {
+ x >>= 1;
+ shifts++;
+ }
+
+ return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+ int i;
+ u8 *buf = (u8 *) 1;
+
+ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->temp_buffer[i].in_use = 0;
+ buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ dev->temp_buffer[i].buffer = buf;
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
+{
+ int i;
+
+ dev->temp_in_use++;
+ if (dev->temp_in_use > dev->max_temp)
+ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].in_use == 0) {
+ dev->temp_buffer[i].in_use = 1;
+ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
+ /*
+ * If we got here then we have to allocate an unmanaged one
+ * This is not good.
+ */
+
+ dev->unmanaged_buffer_allocs++;
+ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
+{
+ int i;
+
+ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer) {
+ dev->temp_buffer[i].in_use = 0;
+ return;
+ }
+ }
+
+ if (buffer) {
+ /* assume it is an unmanaged one. */
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Releasing unmanaged temp buffer");
+ kfree(buffer);
+ dev->unmanaged_buffer_deallocs++;
+ }
+
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ (void) dev;
+ (void) nand_chunk;
+ (void) data;
+ (void) tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+ const struct yaffs_ext_tags *tags)
+{
+ (void) dev;
+ (void) nand_chunk;
+ (void) tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+ if (!bi->gc_prioritise) {
+ bi->gc_prioritise = 1;
+ dev->has_pending_prioritised_gc = 1;
+ bi->chunk_error_strikes++;
+
+ if (bi->chunk_error_strikes > 3) {
+ bi->needs_retiring = 1; /* Too many stikes, so retire */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Block struck out");
+
+ }
+ }
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+ int erased_ok)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs_handle_chunk_error(dev, bi);
+
+ if (erased_ok) {
+ /* Was an actual write failure,
+ * so mark the block for retirement.*/
+ bi->needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d needs retiring", flash_block);
+ }
+
+ /* Delete the chunk */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ * Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+ if (n < 0)
+ n = -n;
+ return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+ return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+ return dev->lost_n_found;
+}
+
+/*
+ * Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 *buffer, int n_bytes)
+{
+ /* Horrible, slow implementation */
+ while (n_bytes--) {
+ if (*buffer != 0xff)
+ return 0;
+ buffer++;
+ }
+ return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+ int retval = YAFFS_OK;
+ u8 *data = yaffs_get_temp_buffer(dev);
+ struct yaffs_ext_tags tags;
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+ tags.chunk_used) {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not erased", nand_chunk);
+ retval = YAFFS_FAIL;
+ }
+
+ yaffs_release_temp_buffer(dev, data);
+
+ return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = YAFFS_OK;
+ struct yaffs_ext_tags temp_tags;
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+ temp_tags.obj_id != tags->obj_id ||
+ temp_tags.chunk_id != tags->chunk_id ||
+ temp_tags.n_bytes != tags->n_bytes)
+ retval = YAFFS_FAIL;
+
+ yaffs_release_temp_buffer(dev, buffer);
+
+ return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+ int reserved_chunks;
+ int reserved_blocks = dev->param.n_reserved_blocks;
+ int checkpt_blocks;
+
+ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+ reserved_chunks =
+ (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
+
+ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_block_info *bi;
+
+ if (dev->n_erased_blocks < 1) {
+ /* Hoosterman we've got a problem.
+ * Can't get space to gc
+ */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no more erased blocks");
+
+ return -1;
+ }
+
+ /* Find an empty block. */
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ dev->alloc_block_finder++;
+ if (dev->alloc_block_finder < dev->internal_start_block
+ || dev->alloc_block_finder > dev->internal_end_block) {
+ dev->alloc_block_finder = dev->internal_start_block;
+ }
+
+ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->seq_number++;
+ bi->seq_number = dev->seq_number;
+ dev->n_erased_blocks--;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Allocated block %d, seq %d, %d left" ,
+ dev->alloc_block_finder, dev->seq_number,
+ dev->n_erased_blocks);
+ return dev->alloc_block_finder;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs tragedy: no more erased blocks, but there should have been %d",
+ dev->n_erased_blocks);
+
+ return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+ struct yaffs_block_info **block_ptr)
+{
+ int ret_val;
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block < 0) {
+ /* Get next block to allocate off */
+ dev->alloc_block = yaffs_find_alloc_block(dev);
+ dev->alloc_page = 0;
+ }
+
+ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+ /* No space unless we're allowed to use the reserve. */
+ return -1;
+ }
+
+ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+ && dev->alloc_page == 0)
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+ /* Next page please.... */
+ if (dev->alloc_block >= 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+ dev->alloc_page;
+ bi->pages_in_use++;
+ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+ dev->alloc_page++;
+
+ dev->n_free_chunks--;
+
+ /* If the block is full set the state to full */
+ if (dev->alloc_page >= dev->param.chunks_per_block) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ if (block_ptr)
+ *block_ptr = bi;
+
+ return ret_val;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
+
+ return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+ int n;
+
+ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ if (dev->alloc_block > 0)
+ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+ return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block > 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+ }
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+ const u8 *data,
+ struct yaffs_ext_tags *tags, int use_reserver)
+{
+ int attempts = 0;
+ int write_ok = 0;
+ int chunk;
+
+ yaffs2_checkpt_invalidate(dev);
+
+ do {
+ struct yaffs_block_info *bi = 0;
+ int erased_ok = 0;
+
+ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+ if (chunk < 0) {
+ /* no space */
+ break;
+ }
+
+ /* First check this chunk is erased, if it needs
+ * checking. The checking policy (unless forced
+ * always on) is as follows:
+ *
+ * Check the first page we try to write in a block.
+ * If the check passes then we don't need to check any
+ * more. If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc,
+ * then we think there might be something odd about
+ * this block and stop using it.
+ *
+ * Rationale: We should only ever see chunks that have
+ * not been erased if there was a partially written
+ * chunk due to power loss. This checking policy should
+ * catch that case with very few checks and thus save a
+ * lot of checks that are most likely not needed.
+ *
+ * Mods to the above
+ * If an erase check fails or the write fails we skip the
+ * rest of the block.
+ */
+
+ /* let's give it a try */
+ attempts++;
+
+ if (dev->param.always_check_erased)
+ bi->skip_erased_check = 0;
+
+ if (!bi->skip_erased_check) {
+ erased_ok = yaffs_check_chunk_erased(dev, chunk);
+ if (erased_ok != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d was not erased",
+ chunk);
+
+ /* If not erased, delete this one,
+ * skip rest of block and
+ * try another chunk */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+ continue;
+ }
+ }
+
+ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+ if (!bi->skip_erased_check)
+ write_ok =
+ yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+ if (write_ok != YAFFS_OK) {
+ /* Clean up aborted write, skip to next block and
+ * try another chunk */
+ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+ continue;
+ }
+
+ bi->skip_erased_check = 1;
+
+ /* Copy the data into the robustification buffer */
+ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+ } while (write_ok != YAFFS_OK &&
+ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+ if (!write_ok)
+ chunk = -1;
+
+ if (attempts > 1) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs write required %d attempts",
+ attempts);
+ dev->n_retried_writes += (attempts - 1);
+ }
+
+ return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to mark bad and erase block %d",
+ flash_block);
+ } else {
+ struct yaffs_ext_tags tags;
+ int chunk_id =
+ flash_block * dev->param.chunks_per_block;
+
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ memset(&tags, 0, sizeof(tags));
+ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+ if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
+ dev->chunk_offset,
+ buffer,
+ &tags) != YAFFS_OK)
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to write bad block marker to block %d",
+ flash_block);
+
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+ }
+
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ bi->gc_prioritise = 0;
+ bi->needs_retiring = 0;
+
+ dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static u16 yaffs_calc_name_sum(const YCHAR *name)
+{
+ u16 sum = 0;
+ u16 i = 1;
+
+ if (!name)
+ return 0;
+
+ while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
+
+ /* 0x1f mask is case insensitive */
+ sum += ((*name) & 0x1f) * i;
+ i++;
+ name++;
+ }
+ return sum;
+}
+
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+ memset(obj->short_name, 0, sizeof(obj->short_name));
+
+ if (name && !name[0]) {
+ yaffs_fix_null_name(obj, obj->short_name,
+ YAFFS_SHORT_NAME_LENGTH);
+ name = obj->short_name;
+ } else if (name &&
+ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+ YAFFS_SHORT_NAME_LENGTH) {
+ strcpy(obj->short_name, name);
+ }
+
+ obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+ memset(tmp_name, 0, sizeof(tmp_name));
+ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_set_obj_name(obj, tmp_name);
+#else
+ yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+loff_t yaffs_max_file_size(struct yaffs_dev *dev)
+{
+ if(sizeof(loff_t) < 8)
+ return YAFFS_MAX_FILE_SIZE_32;
+ else
+ return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+
+ if (tn) {
+ memset(tn, 0, dev->tnode_size);
+ dev->n_tnodes++;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ yaffs_free_raw_tnode(dev, tn);
+ dev->n_tnodes--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ yaffs_deinit_raw_tnodes_and_objs(dev);
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+}
+
+static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos, unsigned val)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 mask;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+ val >>= dev->chunk_grp_bits;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ mask = dev->tnode_mask << bit_in_word;
+
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val << bit_in_word));
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;
+ mask =
+ dev->tnode_mask >> bit_in_word;
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val >> bit_in_word));
+ }
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 val;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ val = map[word_in_map] >> bit_in_word;
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;
+ val |= (map[word_in_map] << bit_in_word);
+ }
+
+ val &= dev->tnode_mask;
+ val <<= dev->chunk_grp_bits;
+
+ return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id)
+{
+ struct yaffs_tnode *tn = file_struct->top;
+ u32 i;
+ int required_depth;
+ int level = file_struct->top_level;
+
+ (void) dev;
+
+ /* Check sane level and chunk Id */
+ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (i) {
+ i >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level)
+ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+ tn = tn->internal[(chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (level - 1) *
+ YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+ }
+
+ return tn;
+}
+
+/* add_find_tnode_0 finds the level 0 tnode if it exists,
+ * otherwise first expands the tree.
+ * This happens in two steps:
+ * 1. If the tree isn't tall enough, then make it taller.
+ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ * If the tn argument is NULL, then a fresh tnode will be added otherwise the
+ * specified tn will be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn)
+{
+ int required_depth;
+ int i;
+ int l;
+ struct yaffs_tnode *tn;
+ u32 x;
+
+ /* Check sane level and page Id */
+ if (file_struct->top_level < 0 ||
+ file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level) {
+ /* Not tall enough, gotta make the tree taller */
+ for (i = file_struct->top_level; i < required_depth; i++) {
+
+ tn = yaffs_get_tnode(dev);
+
+ if (tn) {
+ tn->internal[0] = file_struct->top;
+ file_struct->top = tn;
+ file_struct->top_level++;
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: no more tnodes");
+ return NULL;
+ }
+ }
+ }
+
+ /* Traverse down to level 0, adding anything we need */
+
+ l = file_struct->top_level;
+ tn = file_struct->top;
+
+ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+ if (passed_tn) {
+ /* If we already have one, release it */
+ if (tn->internal[x])
+ yaffs_free_tnode(dev,
+ tn->internal[x]);
+ tn->internal[x] = passed_tn;
+
+ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ }
+ }
+
+ tn = tn->internal[x];
+ l--;
+ }
+ } else {
+ /* top is level 0 */
+ if (passed_tn) {
+ memcpy(tn, passed_tn,
+ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+ yaffs_free_tnode(dev, passed_tn);
+ }
+ }
+
+ return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+ int chunk_obj)
+{
+ return (tags->chunk_id == chunk_obj &&
+ tags->obj_id == obj_id &&
+ !tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+ struct yaffs_ext_tags *tags, int obj_id,
+ int inode_chunk)
+{
+ int j;
+
+ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+ if (yaffs_check_chunk_bit
+ (dev, the_chunk / dev->param.chunks_per_block,
+ the_chunk % dev->param.chunks_per_block)) {
+
+ if (dev->chunk_grp_size == 1)
+ return the_chunk;
+ else {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ tags);
+ if (yaffs_tags_match(tags,
+ obj_id, inode_chunk)) {
+ /* found it; */
+ return the_chunk;
+ }
+ }
+ }
+ the_chunk++;
+ }
+ return -1;
+}
+
+int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ int ret_val = -1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (!tn)
+ return ret_val;
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+ return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ struct yaffs_dev *dev = in->my_dev;
+ int ret_val = -1;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (!tn)
+ return ret_val;
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+
+ /* Delete the entry in the filestructure (if found) */
+ if (ret_val != -1)
+ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+
+ return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan)
+{
+ /* NB in_scan is zero unless scanning.
+ * For forward scanning, in_scan is > 0;
+ * for backward scanning in_scan is < 0
+ *
+ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+ */
+
+ struct yaffs_tnode *tn;
+ struct yaffs_dev *dev = in->my_dev;
+ int existing_cunk;
+ struct yaffs_ext_tags existing_tags;
+ struct yaffs_ext_tags new_tags;
+ unsigned existing_serial, new_serial;
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+ /* Just ignore an attempt at putting a chunk into a non-file
+ * during scanning.
+ * If it is not during Scanning then something went wrong!
+ */
+ if (!in_scan) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy:attempt to put data chunk into a non-file"
+ );
+ BUG();
+ }
+
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+
+ tn = yaffs_add_find_tnode_0(dev,
+ &in->variant.file_variant,
+ inode_chunk, NULL);
+ if (!tn)
+ return YAFFS_FAIL;
+
+ if (!nand_chunk)
+ /* Dummy insert, bail now */
+ return YAFFS_OK;
+
+ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ if (in_scan != 0) {
+ /* If we're scanning then we need to test for duplicates
+ * NB This does not need to be efficient since it should only
+ * happen when the power fails during a write, then only one
+ * chunk should ever be affected.
+ *
+ * Correction for YAFFS2: This could happen quite a lot and we
+ * need to think about efficiency! TODO
+ * Update: For backward scanning we don't need to re-read tags
+ * so this is quite cheap.
+ */
+
+ if (existing_cunk > 0) {
+ /* NB Right now existing chunk will not be real
+ * chunk_id if the chunk group size > 1
+ * thus we have to do a FindChunkInFile to get the
+ * real chunk id.
+ *
+ * We have a duplicate now we need to decide which
+ * one to use:
+ *
+ * Backwards scanning YAFFS2: The old one is what
+ * we use, dump the new one.
+ * YAFFS1: Get both sets of tags and compare serial
+ * numbers.
+ */
+
+ if (in_scan > 0) {
+ /* Only do this for forward scanning */
+ yaffs_rd_chunk_tags_nand(dev,
+ nand_chunk,
+ NULL, &new_tags);
+
+ /* Do a proper find */
+ existing_cunk =
+ yaffs_find_chunk_in_file(in, inode_chunk,
+ &existing_tags);
+ }
+
+ if (existing_cunk <= 0) {
+ /*Hoosterman - how did this happen? */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: existing chunk < 0 in scan"
+ );
+
+ }
+
+ /* NB The deleted flags should be false, otherwise
+ * the chunks will not be loaded during a scan
+ */
+
+ if (in_scan > 0) {
+ new_serial = new_tags.serial_number;
+ existing_serial = existing_tags.serial_number;
+ }
+
+ if ((in_scan > 0) &&
+ (existing_cunk <= 0 ||
+ ((existing_serial + 1) & 3) == new_serial)) {
+ /* Forward scanning.
+ * Use new
+ * Delete the old one and drop through to
+ * update the tnode
+ */
+ yaffs_chunk_del(dev, existing_cunk, 1,
+ __LINE__);
+ } else {
+ /* Backward scanning or we want to use the
+ * existing one
+ * Delete the new one and return early so that
+ * the tnode isn't changed
+ */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+ }
+
+ }
+
+ if (existing_cunk == 0)
+ in->n_data_chunks++;
+
+ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+ return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+ struct yaffs_block_info *the_block;
+ unsigned block_no;
+
+ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+ block_no = chunk / dev->param.chunks_per_block;
+ the_block = yaffs_get_block_info(dev, block_no);
+ if (the_block) {
+ the_block->soft_del_pages++;
+ dev->n_free_chunks++;
+ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+ }
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
+ * the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls
+ * the chunk out of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks
+ * are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+ u32 level, int chunk_offset)
+{
+ int i;
+ int the_chunk;
+ int all_done = 1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tn)
+ return 1;
+
+ if (level > 0) {
+ for (i = YAFFS_NTNODES_INTERNAL - 1;
+ all_done && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ all_done =
+ yaffs_soft_del_worker(in,
+ tn->internal[i],
+ level - 1,
+ (chunk_offset <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ if (all_done) {
+ yaffs_free_tnode(dev,
+ tn->internal[i]);
+ tn->internal[i] = NULL;
+ } else {
+ /* Can this happen? */
+ }
+ }
+ }
+ return (all_done) ? 1 : 0;
+ }
+
+ /* level 0 */
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk) {
+ yaffs_soft_del_chunk(dev, the_chunk);
+ yaffs_load_tnode_0(dev, tn, i, 0);
+ }
+ }
+ return 1;
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_obj *parent;
+
+ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+ yaffs_verify_dir(parent);
+
+ if (dev && dev->param.remove_obj_fn)
+ dev->param.remove_obj_fn(obj);
+
+ list_del_init(&obj->siblings);
+ obj->parent = NULL;
+
+ yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a null pointer directory"
+ );
+ BUG();
+ return;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a non-directory"
+ );
+ BUG();
+ }
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+ BUG();
+ }
+
+ yaffs_verify_dir(directory);
+
+ yaffs_remove_obj_from_dir(obj);
+
+ /* Now add it */
+ list_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+ if (directory == obj->my_dev->unlinked_dir
+ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+ obj->my_dev->n_unlinked_files++;
+ obj->rename_allowed = 0;
+ }
+
+ yaffs_verify_dir(directory);
+ yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+ struct yaffs_obj *new_dir,
+ const YCHAR *new_name, int force, int shadows)
+{
+ int unlink_op;
+ int del_op;
+ struct yaffs_obj *existing_target;
+
+ if (new_dir == NULL)
+ new_dir = obj->parent; /* use the old directory */
+
+ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
+ );
+ BUG();
+ }
+
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+ del_op = (new_dir == obj->my_dev->del_dir);
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+
+ /* If the object is a file going into the unlinked directory,
+ * then it is OK to just stuff it in since duplicate names are OK.
+ * else only proceed if the new name does not exist and we're putting
+ * it into a directory.
+ */
+ if (!(unlink_op || del_op || force ||
+ shadows > 0 || !existing_target) ||
+ new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ return YAFFS_FAIL;
+
+ yaffs_set_obj_name(obj, new_name);
+ obj->dirty = 1;
+ yaffs_add_obj_to_dir(new_dir, obj);
+
+ if (unlink_op)
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc */
+ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ------------------------------
+ * In many situations where there is no high level buffering a lot of
+ * reads might be short sequential reads, and a lot of writes may be short
+ * sequential writes. eg. scanning/writing a jpeg file.
+ * In these cases, a short read/write cache can provide a huge perfomance
+ * benefit with dumb-as-a-rock code.
+ * In Linux, the page cache provides read buffering and the short op cache
+ * provides write buffering.
+ *
+ * There are a small number (~10) of cache chunks per device so that we don't
+ * need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ struct yaffs_cache *cache;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ for (i = 0; i < n_caches; i++) {
+ cache = &dev->cache[i];
+ if (cache->object == obj && cache->dirty)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int lowest = -99; /* Stop compiler whining. */
+ int i;
+ struct yaffs_cache *cache;
+ int chunk_written = 0;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ if (n_caches < 1)
+ return;
+ do {
+ cache = NULL;
+
+ /* Find the lowest dirty chunk for this object */
+ for (i = 0; i < n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].dirty) {
+ if (!cache ||
+ dev->cache[i].chunk_id < lowest) {
+ cache = &dev->cache[i];
+ lowest = cache->chunk_id;
+ }
+ }
+ }
+
+ if (cache && !cache->locked) {
+ /* Write it out and free it up */
+ chunk_written =
+ yaffs_wr_data_obj(cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ cache->object = NULL;
+ }
+ } while (cache && chunk_written > 0);
+
+ if (cache)
+ /* Hoosterman, disk full while writing cache out. */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no space during cache write");
+}
+
+/*yaffs_flush_whole_cache(dev)
+ *
+ *
+ */
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int n_caches = dev->param.n_caches;
+ int i;
+
+ /* Find a dirty object in the cache and flush it...
+ * until there are no further dirty objects.
+ */
+ do {
+ obj = NULL;
+ for (i = 0; i < n_caches && !obj; i++) {
+ if (dev->cache[i].object && dev->cache[i].dirty)
+ obj = dev->cache[i].object;
+ }
+ if (obj)
+ yaffs_flush_file_cache(obj);
+ } while (obj);
+
+}
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (!dev->cache[i].object)
+ return &dev->cache[i];
+ }
+ }
+ return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_cache *cache;
+ struct yaffs_obj *the_obj;
+ int usage;
+ int i;
+ int pushout;
+
+ if (dev->param.n_caches < 1)
+ return NULL;
+
+ /* Try find a non-dirty one... */
+
+ cache = yaffs_grab_chunk_worker(dev);
+
+ if (!cache) {
+ /* They were all dirty, find the LRU object and flush
+ * its cache, then find again.
+ * NB what's here is not very accurate,
+ * we actually flush the object with the LRU chunk.
+ */
+
+ /* With locking we can't assume we can use entry zero,
+ * Set the_obj to a valid pointer for Coverity. */
+ the_obj = dev->cache[0].object;
+ usage = -1;
+ cache = NULL;
+ pushout = -1;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object &&
+ !dev->cache[i].locked &&
+ (dev->cache[i].last_use < usage ||
+ !cache)) {
+ usage = dev->cache[i].last_use;
+ the_obj = dev->cache[i].object;
+ cache = &dev->cache[i];
+ pushout = i;
+ }
+ }
+
+ if (!cache || cache->dirty) {
+ /* Flush and try again */
+ yaffs_flush_file_cache(the_obj);
+ cache = yaffs_grab_chunk_worker(dev);
+ }
+ }
+ return cache;
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+ int chunk_id)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return NULL;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].chunk_id == chunk_id) {
+ dev->cache_hits++;
+
+ return &dev->cache[i];
+ }
+ }
+ return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+ int is_write)
+{
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return;
+
+ if (dev->cache_last_use < 0 ||
+ dev->cache_last_use > 100000000) {
+ /* Reset the cache usages */
+ for (i = 1; i < dev->param.n_caches; i++)
+ dev->cache[i].last_use = 0;
+
+ dev->cache_last_use = 0;
+ }
+ dev->cache_last_use++;
+ cache->last_use = dev->cache_last_use;
+
+ if (is_write)
+ cache->dirty = 1;
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+ struct yaffs_cache *cache;
+
+ if (object->my_dev->param.n_caches > 0) {
+ cache = yaffs_find_chunk_cache(object, chunk_id);
+
+ if (cache)
+ cache->object = NULL;
+ }
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.n_caches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == in)
+ dev->cache[i].object = NULL;
+ }
+ }
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+ int bucket;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ /* If it is still linked into the bucket list, free from the list */
+ if (!list_empty(&obj->hash_link)) {
+ list_del_init(&obj->hash_link);
+ bucket = yaffs_hash_fn(obj->obj_id);
+ dev->obj_bucket[bucket].count--;
+ }
+}
+
+/* FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ if (!obj) {
+ BUG();
+ return;
+ }
+ dev = obj->my_dev;
+ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+ obj, obj->my_inode);
+ if (obj->parent)
+ BUG();
+ if (!list_empty(&obj->siblings))
+ BUG();
+
+ if (obj->my_inode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+ */
+ obj->defered_free = 1;
+ return;
+ }
+
+ yaffs_unhash_obj(obj);
+
+ yaffs_free_raw_obj(dev, obj);
+ dev->n_obj--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+ if (obj->defered_free)
+ yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+ /* Iinvalidate the file's data in the cache, without flushing. */
+ yaffs_invalidate_whole_cache(in);
+
+ if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
+ /* Move to unlinked directory so we have a deletion record */
+ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+ 0);
+ }
+
+ yaffs_remove_obj_from_dir(in);
+ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+ in->hdr_chunk = 0;
+
+ yaffs_free_obj(in);
+ return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+ if (!obj->deleted ||
+ obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
+ obj->soft_del)
+ return;
+
+ if (obj->n_data_chunks <= 0) {
+ /* Empty file with no duplicate object headers,
+ * just delete it immediately */
+ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
+ obj->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: Deleting empty file %d",
+ obj->obj_id);
+ yaffs_generic_obj_del(obj);
+ } else {
+ yaffs_soft_del_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.
+ file_variant.top_level, 0);
+ obj->soft_del = 1;
+ }
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+ struct yaffs_tnode *tn, u32 level,
+ int del0)
+{
+ int i;
+ int has_data;
+
+ if (!tn)
+ return tn;
+
+ has_data = 0;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ tn->internal[i] =
+ yaffs_prune_worker(dev,
+ tn->internal[i],
+ level - 1,
+ (i == 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i])
+ has_data++;
+ }
+ } else {
+ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+ u32 *map = (u32 *) tn;
+
+ for (i = 0; !has_data && i < tnode_size_u32; i++) {
+ if (map[i])
+ has_data++;
+ }
+ }
+
+ if (has_data == 0 && del0) {
+ /* Free and return NULL */
+ yaffs_free_tnode(dev, tn);
+ tn = NULL;
+ }
+ return tn;
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct)
+{
+ int i;
+ int has_data;
+ int done = 0;
+ struct yaffs_tnode *tn;
+
+ if (file_struct->top_level < 1)
+ return YAFFS_OK;
+
+ file_struct->top =
+ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
+
+ /* Now we have a tree with all the non-zero branches NULL but
+ * the height is the same as it was.
+ * Let's see if we can trim internal tnodes to shorten the tree.
+ * We can do this if only the 0th element in the tnode is in use
+ * (ie all the non-zero are NULL)
+ */
+
+ while (file_struct->top_level && !done) {
+ tn = file_struct->top;
+
+ has_data = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i])
+ has_data++;
+ }
+
+ if (!has_data) {
+ file_struct->top = tn->internal[0];
+ file_struct->top_level--;
+ yaffs_free_tnode(dev, tn);
+ } else {
+ done = 1;
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* alloc_empty_obj gets us a clean Object.*/
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+ if (!obj)
+ return obj;
+
+ dev->n_obj++;
+
+ /* Now sweeten it up... */
+
+ memset(obj, 0, sizeof(struct yaffs_obj));
+ obj->being_created = 1;
+
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0;
+ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+ INIT_LIST_HEAD(&(obj->hard_links));
+ INIT_LIST_HEAD(&(obj->hash_link));
+ INIT_LIST_HEAD(&obj->siblings);
+
+ /* Now make the directory sane */
+ if (dev->root_dir) {
+ obj->parent = dev->root_dir;
+ list_add(&(obj->siblings),
+ &dev->root_dir->variant.dir_variant.children);
+ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lost-n-found in lost-n-found so
+ * check if lost-n-found exists first
+ */
+ if (dev->lost_n_found)
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+ obj->being_created = 0;
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+ int i;
+ int l = 999;
+ int lowest = 999999;
+
+ /* Search for the shortest list or one that
+ * isn't too long.
+ */
+
+ for (i = 0; i < 10 && lowest > 4; i++) {
+ dev->bucket_finder++;
+ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+ lowest = dev->obj_bucket[dev->bucket_finder].count;
+ l = dev->bucket_finder;
+ }
+ }
+
+ return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+ int bucket = yaffs_find_nice_bucket(dev);
+ int found = 0;
+ struct list_head *i;
+ u32 n = (u32) bucket;
+
+ /* Now find an object value that has not already been taken
+ * by scanning the list.
+ */
+
+ while (!found) {
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->obj_bucket[bucket].count > 0) {
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* If there is already one in the list */
+ if (i && list_entry(i, struct yaffs_obj,
+ hash_link)->obj_id == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+ return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+ int bucket = yaffs_hash_fn(in->obj_id);
+ struct yaffs_dev *dev = in->my_dev;
+
+ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+ dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+ int bucket = yaffs_hash_fn(number);
+ struct list_head *i;
+ struct yaffs_obj *in;
+
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* Look if it is in the list */
+ in = list_entry(i, struct yaffs_obj, hash_link);
+ if (in->obj_id == number) {
+ /* Don't show if it is defered free */
+ if (in->defered_free)
+ return NULL;
+ return in;
+ }
+ }
+
+ return NULL;
+}
+
+static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+ struct yaffs_tnode *tn = NULL;
+
+ if (number < 0)
+ number = yaffs_new_obj_id(dev);
+
+ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_get_tnode(dev);
+ if (!tn)
+ return NULL;
+ }
+
+ the_obj = yaffs_alloc_empty_obj(dev);
+ if (!the_obj) {
+ if (tn)
+ yaffs_free_tnode(dev, tn);
+ return NULL;
+ }
+
+ the_obj->fake = 0;
+ the_obj->rename_allowed = 1;
+ the_obj->unlink_allowed = 1;
+ the_obj->obj_id = number;
+ yaffs_hash_obj(the_obj);
+ the_obj->variant_type = type;
+ yaffs_load_current_time(the_obj, 1, 1);
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ the_obj->variant.file_variant.file_size = 0;
+ the_obj->variant.file_variant.scanned_size = 0;
+ the_obj->variant.file_variant.shrink_size =
+ yaffs_max_file_size(dev);
+ the_obj->variant.file_variant.top_level = 0;
+ the_obj->variant.file_variant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* No action required */
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* todo this should not happen */
+ break;
+ }
+ return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+ int number, u32 mode)
+{
+
+ struct yaffs_obj *obj =
+ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ if (!obj)
+ return NULL;
+
+ obj->fake = 1; /* it is fake so it might not use NAND */
+ obj->rename_allowed = 0;
+ obj->unlink_allowed = 0;
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0; /* Not a valid chunk. */
+ return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ int i;
+
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+ yaffs_init_raw_tnodes_and_objs(dev);
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+ dev->obj_bucket[i].count = 0;
+ }
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+
+ if (number > 0)
+ the_obj = yaffs_find_by_number(dev, number);
+
+ if (!the_obj)
+ the_obj = yaffs_new_obj(dev, number, type);
+
+ return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR *str)
+{
+ YCHAR *new_str = NULL;
+ int len;
+
+ if (!str)
+ str = _Y("");
+
+ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+ if (new_str) {
+ strncpy(new_str, str, len);
+ new_str[len] = 0;
+ }
+ return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ * create dir/a : update dir's mtime/ctime
+ * rm dir/a: update dir's mtime/ctime
+ * modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ if (!obj)
+ return;
+ dev = obj->my_dev;
+ obj->dirty = 1;
+ yaffs_load_current_time(obj, 0, 1);
+ if (dev->param.defered_dir_update) {
+ struct list_head *link = &obj->variant.dir_variant.dirty;
+
+ if (list_empty(link)) {
+ list_add(link, &dev->dirty_dirs);
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Added object %d to dirty directories",
+ obj->obj_id);
+ }
+
+ } else {
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+ struct list_head *link;
+ struct yaffs_obj *obj;
+ struct yaffs_dir_var *d_s;
+ union yaffs_obj_var *o_v;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+ while (!list_empty(&dev->dirty_dirs)) {
+ link = dev->dirty_dirs.next;
+ list_del_init(link);
+
+ d_s = list_entry(link, struct yaffs_dir_var, dirty);
+ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+ obj = list_entry(o_v, struct yaffs_obj, variant);
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+ obj->obj_id);
+
+ if (obj->dirty)
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+ struct yaffs_obj *parent,
+ const YCHAR *name,
+ u32 mode,
+ u32 uid,
+ u32 gid,
+ struct yaffs_obj *equiv_obj,
+ const YCHAR *alias_str, u32 rdev)
+{
+ struct yaffs_obj *in;
+ YCHAR *str = NULL;
+ struct yaffs_dev *dev = parent->my_dev;
+
+ /* Check if the entry exists.
+ * If it does then fail the call since we don't want a dup. */
+ if (yaffs_find_by_name(parent, name))
+ return NULL;
+
+ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_clone_str(alias_str);
+ if (!str)
+ return NULL;
+ }
+
+ in = yaffs_new_obj(dev, -1, type);
+
+ if (!in) {
+ kfree(str);
+ return NULL;
+ }
+
+ in->hdr_chunk = 0;
+ in->valid = 1;
+ in->variant_type = type;
+
+ in->yst_mode = mode;
+
+ yaffs_attribs_init(in, gid, uid, rdev);
+
+ in->n_data_chunks = 0;
+
+ yaffs_set_obj_name(in, name);
+ in->dirty = 1;
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ in->my_dev = parent->my_dev;
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardlink_variant.equiv_obj = equiv_obj;
+ in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
+ list_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+ /* Could not create the object header, fail */
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ if (in)
+ yaffs_update_parent(parent);
+
+ return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+ uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+ u32 mode, u32 uid, u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, u32 rdev)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, const YCHAR *alias)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+ uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj)
+{
+ /* Get the real object in case we were fed a hard link obj */
+ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+ if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
+ parent, name, 0, 0, 0,
+ equiv_obj, NULL, 0))
+ return equiv_obj;
+
+ return NULL;
+
+}
+
+
+
+/*---------------------- Block Management and Page Allocation -------------*/
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+ if (dev->block_info_alt && dev->block_info)
+ vfree(dev->block_info);
+ else
+ kfree(dev->block_info);
+
+ dev->block_info_alt = 0;
+
+ dev->block_info = NULL;
+
+ if (dev->chunk_bits_alt && dev->chunk_bits)
+ vfree(dev->chunk_bits);
+ else
+ kfree(dev->chunk_bits);
+ dev->chunk_bits_alt = 0;
+ dev->chunk_bits = NULL;
+}
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ dev->block_info = NULL;
+ dev->chunk_bits = NULL;
+ dev->alloc_block = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->block_info =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+ if (!dev->block_info) {
+ dev->block_info =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+ dev->block_info_alt = 1;
+ } else {
+ dev->block_info_alt = 0;
+ }
+
+ if (!dev->block_info)
+ goto alloc_error;
+
+ /* Set up dynamic blockinfo stuff. Round up bytes. */
+ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+ dev->chunk_bits =
+ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+ if (!dev->chunk_bits) {
+ dev->chunk_bits =
+ vmalloc(dev->chunk_bit_stride * n_blocks);
+ dev->chunk_bits_alt = 1;
+ } else {
+ dev->chunk_bits_alt = 0;
+ }
+ if (!dev->chunk_bits)
+ goto alloc_error;
+
+
+ memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
+ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+ return YAFFS_OK;
+
+alloc_error:
+ yaffs_deinit_blocks(dev);
+ return YAFFS_FAIL;
+}
+
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+ int erased_ok = 0;
+ int i;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ "yaffs_block_became_dirty block %d state %d %s",
+ block_no, bi->block_state,
+ (bi->needs_retiring) ? "needs retiring" : "");
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+ /* If this is the block being garbage collected then stop gc'ing */
+ if (block_no == dev->gc_block)
+ dev->gc_block = 0;
+
+ /* If this block is currently the best candidate for gc
+ * then drop as a candidate */
+ if (block_no == dev->gc_dirtiest) {
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ }
+
+ if (!bi->needs_retiring) {
+ yaffs2_checkpt_invalidate(dev);
+ erased_ok = yaffs_erase_block(dev, block_no);
+ if (!erased_ok) {
+ dev->n_erase_failures++;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Erasure failed %d", block_no);
+ }
+ }
+
+ /* Verify erasure if needed */
+ if (erased_ok &&
+ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
+ !yaffs_skip_verification(dev))) {
+ for (i = 0; i < dev->param.chunks_per_block; i++) {
+ if (!yaffs_check_chunk_erased(dev,
+ block_no * dev->param.chunks_per_block + i)) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ ">>Block %d erasure supposedly OK, but chunk %d not erased",
+ block_no, i);
+ }
+ }
+ }
+
+ if (!erased_ok) {
+ /* We lost a block of free space */
+ dev->n_free_chunks -= dev->param.chunks_per_block;
+ yaffs_retire_block(dev, block_no);
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d retired", block_no);
+ return;
+ }
+
+ /* Clean it up... */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ bi->seq_number = 0;
+ dev->n_erased_blocks++;
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+ bi->has_shrink_hdr = 0;
+ bi->skip_erased_check = 1; /* Clean, so no need to check */
+ bi->gc_prioritise = 0;
+ bi->has_summary = 0;
+
+ yaffs_clear_chunk_bits(dev, block_no);
+
+ yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
+}
+
+static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi,
+ int old_chunk, u8 *buffer)
+{
+ int new_chunk;
+ int mark_flash = 1;
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj *object;
+ int matching_chunk;
+ int ret_val = YAFFS_OK;
+
+ memset(&tags, 0, sizeof(tags));
+ yaffs_rd_chunk_tags_nand(dev, old_chunk,
+ buffer, &tags);
+ object = yaffs_find_by_number(dev, tags.obj_id);
+
+ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+ "Collecting chunk in block %d, %d %d %d ",
+ dev->gc_chunk, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ if (object && !yaffs_skip_verification(dev)) {
+ if (tags.chunk_id == 0)
+ matching_chunk =
+ object->hdr_chunk;
+ else if (object->soft_del)
+ /* Defeat the test */
+ matching_chunk = old_chunk;
+ else
+ matching_chunk =
+ yaffs_find_chunk_in_file
+ (object, tags.chunk_id,
+ NULL);
+
+ if (old_chunk != matching_chunk)
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "gc: page in gc mismatch: %d %d %d %d",
+ old_chunk,
+ matching_chunk,
+ tags.obj_id,
+ tags.chunk_id);
+ }
+
+ if (!object) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "page %d in gc has no object: %d %d %d ",
+ old_chunk,
+ tags.obj_id, tags.chunk_id,
+ tags.n_bytes);
+ }
+
+ if (object &&
+ object->deleted &&
+ object->soft_del && tags.chunk_id != 0) {
+ /* Data chunk in a soft deleted file,
+ * throw it away.
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget
+ * about it and fix up the object.
+ */
+
+ /* Free chunks already includes
+ * softdeleted chunks, how ever this
+ * chunk is going to soon be really
+ * deleted which will increment free
+ * chunks. We have to decrement free
+ * chunks so this works out properly.
+ */
+ dev->n_free_chunks--;
+ bi->soft_del_pages--;
+
+ object->n_data_chunks--;
+ if (object->n_data_chunks <= 0) {
+ /* remeber to clean up obj */
+ dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
+ dev->n_clean_ups++;
+ }
+ mark_flash = 0;
+ } else if (object) {
+ /* It's either a data chunk in a live
+ * file or an ObjectHeader, so we're
+ * interested in it.
+ * NB Need to keep the ObjectHeaders of
+ * deleted files until the whole file
+ * has been deleted off
+ */
+ tags.serial_number++;
+ dev->n_gc_copies++;
+
+ if (tags.chunk_id == 0) {
+ /* It is an object Id,
+ * We need to nuke the
+ * shrinkheader flags since its
+ * work is done.
+ * Also need to clean up
+ * shadowing.
+ */
+ struct yaffs_obj_hdr *oh;
+ oh = (struct yaffs_obj_hdr *) buffer;
+
+ oh->is_shrink = 0;
+ tags.extra_is_shrink = 0;
+ oh->shadows_obj = 0;
+ oh->inband_shadowed_obj_id = 0;
+ tags.extra_shadows = 0;
+
+ /* Update file size */
+ if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ yaffs_oh_size_load(oh,
+ object->variant.file_variant.file_size);
+ tags.extra_file_size =
+ object->variant.file_variant.file_size;
+ }
+
+ yaffs_verify_oh(object, oh, &tags, 1);
+ new_chunk =
+ yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
+ } else {
+ new_chunk =
+ yaffs_write_new_chunk(dev, buffer, &tags, 1);
+ }
+
+ if (new_chunk < 0) {
+ ret_val = YAFFS_FAIL;
+ } else {
+
+ /* Now fix up the Tnodes etc. */
+
+ if (tags.chunk_id == 0) {
+ /* It's a header */
+ object->hdr_chunk = new_chunk;
+ object->serial = tags.serial_number;
+ } else {
+ /* It's a data chunk */
+ yaffs_put_chunk_in_file(object, tags.chunk_id,
+ new_chunk, 0);
+ }
+ }
+ }
+ if (ret_val == YAFFS_OK)
+ yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
+ return ret_val;
+}
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+ int old_chunk;
+ int ret_val = YAFFS_OK;
+ int i;
+ int is_checkpt_block;
+ int max_copies;
+ int chunks_before = yaffs_get_erased_chunks(dev);
+ int chunks_after;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d, in use %d, shrink %d, whole_block %d",
+ block, bi->pages_in_use, bi->has_shrink_hdr,
+ whole_block);
+
+ /*yaffs_verify_free_chunks(dev); */
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+ dev->gc_disable = 1;
+
+ yaffs_summary_gc(dev, block);
+
+ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d that has no chunks in use",
+ block);
+ yaffs_block_became_dirty(dev, block);
+ } else {
+
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_verify_blk(dev, bi, block);
+
+ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+ for (/* init already done */ ;
+ ret_val == YAFFS_OK &&
+ dev->gc_chunk < dev->param.chunks_per_block &&
+ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+ max_copies > 0;
+ dev->gc_chunk++, old_chunk++) {
+ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+ /* Page is in use and might need to be copied */
+ max_copies--;
+ ret_val = yaffs_gc_process_chunk(dev, bi,
+ old_chunk, buffer);
+ }
+ }
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+
+ yaffs_verify_collected_blk(dev, bi, block);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ /*
+ * The gc did not complete. Set block state back to FULL
+ * because checkpointing does not restore gc.
+ */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ } else {
+ /* The gc completed. */
+ /* Do any required cleanups */
+ for (i = 0; i < dev->n_clean_ups; i++) {
+ /* Time to delete the file too */
+ struct yaffs_obj *object =
+ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+ if (object) {
+ yaffs_free_tnode(dev,
+ object->variant.file_variant.top);
+ object->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: About to finally delete object %d",
+ object->obj_id);
+ yaffs_generic_obj_del(object);
+ object->my_dev->n_deleted_files--;
+ }
+
+ }
+ chunks_after = yaffs_get_erased_chunks(dev);
+ if (chunks_before >= chunks_after)
+ yaffs_trace(YAFFS_TRACE_GC,
+ "gc did not increase free chunks before %d after %d",
+ chunks_before, chunks_after);
+ dev->gc_block = 0;
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ dev->gc_disable = 0;
+
+ return ret_val;
+}
+
+/*
+ * find_gc_block() selects the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+ int aggressive, int background)
+{
+ int i;
+ int iterations;
+ unsigned selected = 0;
+ int prioritised = 0;
+ int prioritised_exist = 0;
+ struct yaffs_block_info *bi;
+ int threshold;
+
+ /* First let's see if we need to grab a prioritised block */
+ if (dev->has_pending_prioritised_gc && !aggressive) {
+ dev->gc_dirtiest = 0;
+ bi = dev->block_info;
+ for (i = dev->internal_start_block;
+ i <= dev->internal_end_block && !selected; i++) {
+
+ if (bi->gc_prioritise) {
+ prioritised_exist = 1;
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ selected = i;
+ prioritised = 1;
+ }
+ }
+ bi++;
+ }
+
+ /*
+ * If there is a prioritised block and none was selected then
+ * this happened because there is at least one old dirty block
+ * gumming up the works. Let's gc the oldest dirty block.
+ */
+
+ if (prioritised_exist &&
+ !selected && dev->oldest_dirty_block > 0)
+ selected = dev->oldest_dirty_block;
+
+ if (!prioritised_exist) /* None found, so we can clear this */
+ dev->has_pending_prioritised_gc = 0;
+ }
+
+ /* If we're doing aggressive GC then we are happy to take a less-dirty
+ * block, and search harder.
+ * else (leasurely gc), then we only bother to do this if the
+ * block has only a few pages in use.
+ */
+
+ if (!selected) {
+ int pages_used;
+ int n_blocks =
+ dev->internal_end_block - dev->internal_start_block + 1;
+ if (aggressive) {
+ threshold = dev->param.chunks_per_block;
+ iterations = n_blocks;
+ } else {
+ int max_threshold;
+
+ if (background)
+ max_threshold = dev->param.chunks_per_block / 2;
+ else
+ max_threshold = dev->param.chunks_per_block / 8;
+
+ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+ if (threshold > max_threshold)
+ threshold = max_threshold;
+
+ iterations = n_blocks / 16 + 1;
+ if (iterations > 100)
+ iterations = 100;
+ }
+
+ for (i = 0;
+ i < iterations &&
+ (dev->gc_dirtiest < 1 ||
+ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
+ i++) {
+ dev->gc_block_finder++;
+ if (dev->gc_block_finder < dev->internal_start_block ||
+ dev->gc_block_finder > dev->internal_end_block)
+ dev->gc_block_finder =
+ dev->internal_start_block;
+
+ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+ pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ pages_used < dev->param.chunks_per_block &&
+ (dev->gc_dirtiest < 1 ||
+ pages_used < dev->gc_pages_in_use) &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ dev->gc_dirtiest = dev->gc_block_finder;
+ dev->gc_pages_in_use = pages_used;
+ }
+ }
+
+ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+ selected = dev->gc_dirtiest;
+ }
+
+ /*
+ * If nothing has been selected for a while, try the oldest dirty
+ * because that's gumming up the works.
+ */
+
+ if (!selected && dev->param.is_yaffs2 &&
+ dev->gc_not_done >= (background ? 10 : 20)) {
+ yaffs2_find_oldest_dirty_seq(dev);
+ if (dev->oldest_dirty_block > 0) {
+ selected = dev->oldest_dirty_block;
+ dev->gc_dirtiest = selected;
+ dev->oldest_dirty_gc_count++;
+ bi = yaffs_get_block_info(dev, selected);
+ dev->gc_pages_in_use =
+ bi->pages_in_use - bi->soft_del_pages;
+ } else {
+ dev->gc_not_done = 0;
+ }
+ }
+
+ if (selected) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC Selected block %d with %d free, prioritised:%d",
+ selected,
+ dev->param.chunks_per_block - dev->gc_pages_in_use,
+ prioritised);
+
+ dev->n_gc_blocks++;
+ if (background)
+ dev->bg_gcs++;
+
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ dev->gc_not_done = 0;
+ if (dev->refresh_skip > 0)
+ dev->refresh_skip--;
+ } else {
+ dev->gc_not_done++;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+ dev->gc_block_finder, dev->gc_not_done, threshold,
+ dev->gc_dirtiest, dev->gc_pages_in_use,
+ dev->oldest_dirty_block, background ? " bg" : "");
+ }
+
+ return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and only accepts more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+ int aggressive = 0;
+ int gc_ok = YAFFS_OK;
+ int max_tries = 0;
+ int min_erased;
+ int erased_chunks;
+ int checkpt_block_adjust;
+
+ if (dev->param.gc_control_fn &&
+ (dev->param.gc_control_fn(dev) & 1) == 0)
+ return YAFFS_OK;
+
+ if (dev->gc_disable)
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+
+ /* This loop should pass the first time.
+ * Only loops here if the collection does not increase space.
+ */
+
+ do {
+ max_tries++;
+
+ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+ min_erased =
+ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+ erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ /* If we need a block soon then do aggressive gc. */
+ if (dev->n_erased_blocks < min_erased)
+ aggressive = 1;
+ else {
+ if (!background
+ && erased_chunks > (dev->n_free_chunks / 4))
+ break;
+
+ if (dev->gc_skip > 20)
+ dev->gc_skip = 20;
+ if (erased_chunks < dev->n_free_chunks / 2 ||
+ dev->gc_skip < 1 || background)
+ aggressive = 0;
+ else {
+ dev->gc_skip--;
+ break;
+ }
+ }
+
+ dev->gc_skip = 5;
+
+ /* If we don't already have a block being gc'd then see if we
+ * should start another */
+
+ if (dev->gc_block < 1 && !aggressive) {
+ dev->gc_block = yaffs2_find_refresh_block(dev);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+ if (dev->gc_block < 1) {
+ dev->gc_block =
+ yaffs_find_gc_block(dev, aggressive, background);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ if (dev->gc_block > 0) {
+ dev->all_gcs++;
+ if (!aggressive)
+ dev->passive_gc_count++;
+
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC n_erased_blocks %d aggressive %d",
+ dev->n_erased_blocks, aggressive);
+
+ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
+ dev->gc_block > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+ dev->n_erased_blocks, max_tries,
+ dev->gc_block);
+ }
+ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ (dev->gc_block > 0) && (max_tries < 2));
+
+ return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+ yaffs_check_gc(dev, 1);
+ return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+ if (nand_chunk >= 0)
+ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+ buffer, NULL);
+ else {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not found zero instead",
+ nand_chunk);
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+ return 0;
+ }
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn)
+{
+ int block;
+ int page;
+ struct yaffs_ext_tags tags;
+ struct yaffs_block_info *bi;
+
+ if (chunk_id <= 0)
+ return;
+
+ dev->n_deletions++;
+ block = chunk_id / dev->param.chunks_per_block;
+ page = chunk_id % dev->param.chunks_per_block;
+
+ if (!yaffs_check_chunk_bit(dev, block, page))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Deleting invalid chunk %d", chunk_id);
+
+ bi = yaffs_get_block_info(dev, block);
+
+ yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+ yaffs_trace(YAFFS_TRACE_DELETION,
+ "line %d delete of chunk %d",
+ lyn, chunk_id);
+
+ if (!dev->param.is_yaffs2 && mark_flash &&
+ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+ memset(&tags, 0, sizeof(tags));
+ tags.is_deleted = 1;
+ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+ yaffs_handle_chunk_update(dev, chunk_id, &tags);
+ } else {
+ dev->n_unmarked_deletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->n_free_chunks++;
+ yaffs_clear_chunk_bit(dev, block, page);
+ bi->pages_in_use--;
+
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ yaffs_block_became_dirty(dev, block);
+ }
+ }
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 *buffer, int n_bytes, int use_reserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prev_chunk_id;
+ struct yaffs_ext_tags prev_tags;
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_dev *dev = in->my_dev;
+
+ yaffs_check_gc(dev, 0);
+
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
+ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+ if (prev_chunk_id < 1 &&
+ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+ return 0;
+
+ /* Set up new tags */
+ memset(&new_tags, 0, sizeof(new_tags));
+
+ new_tags.chunk_id = inode_chunk;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number =
+ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+ new_tags.n_bytes = n_bytes;
+
+ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Writing %d bytes to chunk!!!!!!!!!",
+ n_bytes);
+ BUG();
+ }
+
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+ if (new_chunk_id > 0) {
+ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ yaffs_verify_file_sane(in);
+ }
+ return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+ const YCHAR *name, const void *value, int size,
+ int flags)
+{
+ struct yaffs_xattr_mod xmod;
+ int result;
+
+ xmod.set = set;
+ xmod.name = name;
+ xmod.data = value;
+ xmod.size = size;
+ xmod.flags = flags;
+ xmod.result = -ENOSPC;
+
+ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+ if (result > 0)
+ return xmod.result;
+ else
+ return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+ struct yaffs_xattr_mod *xmod)
+{
+ int retval = 0;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+ char *x_buffer = buffer + x_offs;
+
+ if (xmod->set)
+ retval =
+ nval_set(x_buffer, x_size, xmod->name, xmod->data,
+ xmod->size, xmod->flags);
+ else
+ retval = nval_del(x_buffer, x_size, xmod->name);
+
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ xmod->result = retval;
+
+ return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
+ void *value, int size)
+{
+ char *buffer = NULL;
+ int result;
+ struct yaffs_ext_tags tags;
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+ char *x_buffer;
+ int retval = 0;
+
+ if (obj->hdr_chunk < 1)
+ return -ENODATA;
+
+ /* If we know that the object has no xattribs then don't do all the
+ * reading and parsing.
+ */
+ if (obj->xattr_known && !obj->has_xattr) {
+ if (name)
+ return -ENODATA;
+ else
+ return 0;
+ }
+
+ buffer = (char *)yaffs_get_temp_buffer(dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+ if (result != YAFFS_OK)
+ retval = -ENOENT;
+ else {
+ x_buffer = buffer + x_offs;
+
+ if (!obj->xattr_known) {
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ }
+
+ if (name)
+ retval = nval_get(x_buffer, x_size, name, value, size);
+ else
+ retval = nval_list(x_buffer, x_size, value, size);
+ }
+ yaffs_release_temp_buffer(dev, (u8 *) buffer);
+ return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags)
+{
+ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size)
+{
+ return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+ u8 *buf;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+
+ if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
+ return;
+
+ dev = in->my_dev;
+ in->lazy_loaded = 0;
+ buf = yaffs_get_temp_buffer(dev);
+
+ result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
+ oh = (struct yaffs_obj_hdr *)buf;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ yaffs_set_obj_name_from_oh(in, oh);
+
+ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ in->variant.symlink_variant.alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.symlink_variant.alias)
+ alloc_failed = 1; /* Not returned */
+ }
+ yaffs_release_temp_buffer(dev, buf);
+}
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
+ const YCHAR *oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ if (dev->param.auto_unicode) {
+ if (*oh_name) {
+ /* It is an ASCII name, do an ASCII to
+ * unicode conversion */
+ const char *ascii_oh_name = (const char *)oh_name;
+ int n = buff_size - 1;
+ while (n > 0 && *ascii_oh_name) {
+ *name = *ascii_oh_name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ strncpy(name, oh_name + 1, buff_size - 1);
+ }
+ } else {
+#else
+ (void) dev;
+ {
+#endif
+ strncpy(name, oh_name, buff_size - 1);
+ }
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
+ const YCHAR *name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+ int is_ascii;
+ YCHAR *w;
+
+ if (dev->param.auto_unicode) {
+
+ is_ascii = 1;
+ w = name;
+
+ /* Figure out if the name will fit in ascii character set */
+ while (is_ascii && *w) {
+ if ((*w) & 0xff00)
+ is_ascii = 0;
+ w++;
+ }
+
+ if (is_ascii) {
+ /* It is an ASCII name, so convert unicode to ascii */
+ char *ascii_oh_name = (char *)oh_name;
+ int n = YAFFS_MAX_NAME_LENGTH - 1;
+ while (n > 0 && *name) {
+ *ascii_oh_name = *name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ /* Unicode name, so save starting at the second YCHAR */
+ *oh_name = 0;
+ strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
+ }
+ } else {
+#else
+ dev = dev;
+ {
+#endif
+ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+ }
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
+ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+ struct yaffs_block_info *bi;
+ struct yaffs_dev *dev = in->my_dev;
+ int prev_chunk_id;
+ int ret_val = 0;
+ int result = 0;
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_ext_tags old_tags;
+ const YCHAR *alias = NULL;
+ u8 *buffer = NULL;
+ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+ struct yaffs_obj_hdr *oh = NULL;
+ loff_t file_size = 0;
+
+ strcpy(old_name, _Y("silly old name"));
+
+ if (in->fake && in != dev->root_dir && !force && !xmod)
+ return ret_val;
+
+ yaffs_check_gc(dev, 0);
+ yaffs_check_obj_details_loaded(in);
+
+ buffer = yaffs_get_temp_buffer(in->my_dev);
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ prev_chunk_id = in->hdr_chunk;
+
+ if (prev_chunk_id > 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+ buffer, &old_tags);
+
+ yaffs_verify_oh(in, oh, &old_tags, 0);
+ memcpy(old_name, oh->name, sizeof(oh->name));
+ memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
+ } else {
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ }
+
+ oh->type = in->variant_type;
+ oh->yst_mode = in->yst_mode;
+ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+ yaffs_load_attribs_oh(oh, in);
+
+ if (in->parent)
+ oh->parent_obj_id = in->parent->obj_id;
+ else
+ oh->parent_obj_id = 0;
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_load_oh_from_name(dev, oh->name, name);
+ } else if (prev_chunk_id > 0) {
+ memcpy(oh->name, old_name, sizeof(oh->name));
+ } else {
+ memset(oh->name, 0, sizeof(oh->name));
+ }
+
+ oh->is_shrink = is_shrink;
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Should not happen */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
+ oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
+ file_size = in->variant.file_variant.file_size;
+ yaffs_oh_size_load(oh, file_size);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = in->variant.symlink_variant.alias;
+ if (!alias)
+ alias = _Y("no alias");
+ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+ break;
+ }
+
+ /* process any xattrib modifications */
+ if (xmod)
+ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+ /* Tags */
+ memset(&new_tags, 0, sizeof(new_tags));
+ in->serial++;
+ new_tags.chunk_id = 0;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number = in->serial;
+
+ /* Add extra info for file header */
+ new_tags.extra_available = 1;
+ new_tags.extra_parent_id = oh->parent_obj_id;
+ new_tags.extra_file_size = file_size;
+ new_tags.extra_is_shrink = oh->is_shrink;
+ new_tags.extra_equiv_id = oh->equiv_id;
+ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+ new_tags.extra_obj_type = in->variant_type;
+ yaffs_verify_oh(in, oh, &new_tags, 1);
+
+ /* Create new chunk in NAND */
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags,
+ (prev_chunk_id > 0) ? 1 : 0);
+
+ if (buffer)
+ yaffs_release_temp_buffer(dev, buffer);
+
+ if (new_chunk_id < 0)
+ return new_chunk_id;
+
+ in->hdr_chunk = new_chunk_id;
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ if (!yaffs_obj_cache_dirty(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block
+ * that the chunk lives on */
+ if (is_shrink) {
+ bi = yaffs_get_block_info(in->my_dev,
+ new_chunk_id /
+ in->my_dev->param.chunks_per_block);
+ bi->has_shrink_hdr = 1;
+ }
+
+
+ return new_chunk_id;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ struct yaffs_cache *cache;
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+ if ((start + n) < dev->data_bytes_per_chunk)
+ n_copy = n;
+ else
+ n_copy = dev->data_bytes_per_chunk - start;
+
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than
+ * a whole chunk or we're using inband tags then use the cache
+ * (if there is caching) else bypass the cache.
+ */
+ if (cache || n_copy != dev->data_bytes_per_chunk ||
+ dev->param.inband_tags) {
+ if (dev->param.n_caches > 0) {
+
+ /* If we can't find the data in the cache,
+ * then load it up. */
+
+ if (!cache) {
+ cache =
+ yaffs_grab_chunk_cache(in->my_dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ cache->n_bytes = 0;
+ }
+
+ yaffs_use_cache(dev, cache, 0);
+
+ cache->locked = 1;
+
+ memcpy(buffer, &cache->data[start], n_copy);
+
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy.. */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev);
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(buffer, &local_buffer[start], n_copy);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+ } else {
+ /* A full chunk. Read directly into the buffer. */
+ yaffs_rd_data_obj(in, chunk, buffer);
+ }
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+ return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_through)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ int n_writeback;
+ loff_t start_write = offset;
+ int chunk_written = 0;
+ u32 n_bytes_read;
+ loff_t chunk_start;
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0 && chunk_written >= 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+ if (((loff_t)chunk) *
+ dev->data_bytes_per_chunk + start != offset ||
+ start >= dev->data_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "AddrToChunk of offset %lld gives chunk %d start %d",
+ offset, chunk, start);
+ }
+ chunk++; /* File pos to chunk in file offset */
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+
+ if ((start + n) < dev->data_bytes_per_chunk) {
+ n_copy = n;
+
+ /* Now calculate how many bytes to write back....
+ * If we're overwriting and not writing to then end of
+ * file then we need to write back as much as was there
+ * before.
+ */
+
+ chunk_start = (((loff_t)(chunk - 1)) *
+ dev->data_bytes_per_chunk);
+
+ if (chunk_start > in->variant.file_variant.file_size)
+ n_bytes_read = 0; /* Past end of file */
+ else
+ n_bytes_read =
+ in->variant.file_variant.file_size -
+ chunk_start;
+
+ if (n_bytes_read > dev->data_bytes_per_chunk)
+ n_bytes_read = dev->data_bytes_per_chunk;
+
+ n_writeback =
+ (n_bytes_read >
+ (start + n)) ? n_bytes_read : (start + n);
+
+ if (n_writeback < 0 ||
+ n_writeback > dev->data_bytes_per_chunk)
+ BUG();
+
+ } else {
+ n_copy = dev->data_bytes_per_chunk - start;
+ n_writeback = dev->data_bytes_per_chunk;
+ }
+
+ if (n_copy != dev->data_bytes_per_chunk ||
+ !dev->param.cache_bypass_aligned ||
+ dev->param.inband_tags) {
+ /* An incomplete start or end chunk (or maybe both
+ * start and end chunk), or we're using inband tags,
+ * or we're forcing writes through the cache,
+ * so we want to use the cache buffers.
+ */
+ if (dev->param.n_caches > 0) {
+ struct yaffs_cache *cache;
+
+ /* If we can't find the data in the cache, then
+ * load the cache */
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ if (!cache &&
+ yaffs_check_alloc_available(dev, 1)) {
+ cache = yaffs_grab_chunk_cache(dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ } else if (cache &&
+ !cache->dirty &&
+ !yaffs_check_alloc_available(dev,
+ 1)) {
+ /* Drop the cache if it was a read cache
+ * item and no space check has been made
+ * for it.
+ */
+ cache = NULL;
+ }
+
+ if (cache) {
+ yaffs_use_cache(dev, cache, 1);
+ cache->locked = 1;
+
+ memcpy(&cache->data[start], buffer,
+ n_copy);
+
+ cache->locked = 0;
+ cache->n_bytes = n_writeback;
+
+ if (write_through) {
+ chunk_written =
+ yaffs_wr_data_obj
+ (cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ }
+ } else {
+ chunk_written = -1; /* fail write */
+ }
+ } else {
+ /* An incomplete start or end chunk (or maybe
+ * both start and end chunk). Read into the
+ * local buffer then copy over and write back.
+ */
+
+ u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+ memcpy(&local_buffer[start], buffer, n_copy);
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk,
+ local_buffer,
+ n_writeback, 0);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+ } else {
+ /* A full chunk. Write directly from the buffer. */
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk, buffer,
+ dev->data_bytes_per_chunk, 0);
+
+ /* Since we've overwritten the cached data,
+ * we better invalidate it. */
+ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+ if (chunk_written >= 0) {
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+ }
+
+ /* Update file object */
+
+ if ((start_write + n_done) > in->variant.file_variant.file_size)
+ in->variant.file_variant.file_size = (start_write + n_done);
+
+ in->dirty = 1;
+ return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_through)
+{
+ yaffs2_handle_hole(in, offset);
+ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
+{
+
+ struct yaffs_dev *dev = in->my_dev;
+ loff_t old_size = in->variant.file_variant.file_size;
+ int i;
+ int chunk_id;
+ u32 dummy;
+ int last_del;
+ int start_del;
+
+ if (old_size > 0)
+ yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
+ else
+ last_del = 0;
+
+ yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
+ &start_del, &dummy);
+ last_del++;
+ start_del++;
+
+ /* Delete backwards so that we don't end up with holes if
+ * power is lost part-way through the operation.
+ */
+ for (i = last_del; i >= start_del; i--) {
+ /* NB this could be optimised somewhat,
+ * eg. could retrieve the tags and write them without
+ * using yaffs_chunk_del
+ */
+
+ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+
+ if (chunk_id < 1)
+ continue;
+
+ if (chunk_id <
+ (dev->internal_start_block * dev->param.chunks_per_block) ||
+ chunk_id >=
+ ((dev->internal_end_block + 1) *
+ dev->param.chunks_per_block)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Found daft chunk_id %d for %d",
+ chunk_id, i);
+ } else {
+ in->n_data_chunks--;
+ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+ }
+ }
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+ int new_full;
+ u32 new_partial;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+ yaffs_prune_chunks(obj, new_size);
+
+ if (new_partial != 0) {
+ int last_chunk = 1 + new_full;
+ u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+ /* Rewrite the last chunk with its new size and zero pad */
+ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+ memset(local_buffer + new_partial, 0,
+ dev->data_bytes_per_chunk - new_partial);
+
+ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+ new_partial, 1);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+
+ obj->variant.file_variant.file_size = new_size;
+
+ yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+ struct yaffs_dev *dev = in->my_dev;
+ loff_t old_size = in->variant.file_variant.file_size;
+
+ yaffs_flush_file_cache(in);
+ yaffs_invalidate_whole_cache(in);
+
+ yaffs_check_gc(dev, 0);
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ if (new_size == old_size)
+ return YAFFS_OK;
+
+ if (new_size > old_size) {
+ yaffs2_handle_hole(in, new_size);
+ in->variant.file_variant.file_size = new_size;
+ } else {
+ /* new_size < old_size */
+ yaffs_resize_file_down(in, new_size);
+ }
+
+ /* Write a new object header to reflect the resize.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories
+ * and is not shadowed.
+ */
+ if (in->parent &&
+ !in->is_shadowed &&
+ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+ return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
+{
+ if (!in->dirty)
+ return YAFFS_OK;
+
+ yaffs_flush_file_cache(in);
+
+ if (data_sync)
+ return YAFFS_OK;
+
+ if (update_time)
+ yaffs_load_current_time(in, 0, 0);
+
+ return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
+ YAFFS_OK : YAFFS_FAIL;
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+ int ret_val;
+ int del_now = 0;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!in->my_inode)
+ del_now = 1;
+
+ if (del_now) {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->del_dir,
+ _Y("deleted"), 0, 0);
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: immediate deletion of file %d",
+ in->obj_id);
+ in->deleted = 1;
+ in->my_dev->n_deleted_files++;
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+ yaffs_soft_del_file(in);
+ } else {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+ return ret_val;
+}
+
+static int yaffs_del_file(struct yaffs_obj *in)
+{
+ int ret_val = YAFFS_OK;
+ int deleted; /* Need to cache value on stack if in is freed */
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+
+ if (in->n_data_chunks > 0) {
+ /* Use soft deletion if there is data in the file.
+ * That won't be the case if it has been resized to zero.
+ */
+ if (!in->unlinked)
+ ret_val = yaffs_unlink_file_if_needed(in);
+
+ deleted = in->deleted;
+
+ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
+ deleted = 1;
+ in->my_dev->n_deleted_files++;
+ yaffs_soft_del_file(in);
+ }
+ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+ in->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(in);
+
+ return YAFFS_OK;
+ }
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+ return (obj &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+ !(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+ /* First check that the directory is empty. */
+ if (yaffs_is_non_empty_dir(obj))
+ return YAFFS_FAIL;
+
+ return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+ kfree(in->variant.symlink_variant.alias);
+ in->variant.symlink_variant.alias = NULL;
+
+ return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+ /* remove this hardlink from the list associated with the equivalent
+ * object
+ */
+ list_del_init(&in->hard_links);
+ return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+ int ret_val = -1;
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ ret_val = yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!list_empty(&obj->variant.dir_variant.dirty)) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Remove object %d from dirty directories",
+ obj->obj_id);
+ list_del_init(&obj->variant.dir_variant.dirty);
+ }
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ ret_val = yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ ret_val = yaffs_del_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ ret_val = yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ ret_val = 0;
+ break; /* should not happen. */
+ }
+ return ret_val;
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+ int del_now = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (!obj->my_inode)
+ del_now = 1;
+
+ yaffs_update_parent(obj->parent);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_del_link(obj);
+ } else if (!list_empty(&obj->hard_links)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+ * The Linux link/inode model.
+ *
+ * We can't really delete the object.
+ * Instead, we do the following:
+ * - Select a hardlink.
+ * - Unhook it from the hard links
+ * - Move it from its parent directory so that the rename works.
+ * - Rename the object to the hardlink's name.
+ * - Delete the hardlink
+ */
+
+ struct yaffs_obj *hl;
+ struct yaffs_obj *parent;
+ int ret_val;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+ hard_links);
+
+ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ parent = hl->parent;
+
+ list_del_init(&hl->hard_links);
+
+ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+ if (ret_val == YAFFS_OK)
+ ret_val = yaffs_generic_obj_del(hl);
+
+ return ret_val;
+
+ } else if (del_now) {
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ list_del_init(&obj->variant.dir_variant.dirty);
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ return yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ return YAFFS_FAIL;
+ }
+ } else if (yaffs_is_non_empty_dir(obj)) {
+ return YAFFS_FAIL;
+ } else {
+ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->unlink_allowed)
+ return yaffs_unlink_worker(obj);
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
+{
+ struct yaffs_obj *obj;
+
+ obj = yaffs_find_by_name(dir, name);
+ return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
+ struct yaffs_obj *new_dir, const YCHAR *new_name)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *existing_target = NULL;
+ int force = 0;
+ int result;
+ struct yaffs_dev *dev;
+
+ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems.
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+ if (old_dir == new_dir &&
+ old_name && new_name &&
+ strcmp(old_name, new_name) == 0)
+ force = 1;
+#endif
+
+ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+
+ if (old_name)
+ obj = yaffs_find_by_name(old_dir, old_name);
+ else{
+ obj = old_dir;
+ old_dir = obj->parent;
+ }
+
+ if (obj && obj->rename_allowed) {
+ /* Now handle an existing target, if there is one */
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+ if (yaffs_is_non_empty_dir(existing_target)) {
+ return YAFFS_FAIL; /* ENOTEMPTY */
+ } else if (existing_target && existing_target != obj) {
+ /* Nuke the target first, using shadowing,
+ * but only if it isn't the same object.
+ *
+ * Note we must disable gc here otherwise it can mess
+ * up the shadowing.
+ *
+ */
+ dev->gc_disable = 1;
+ yaffs_change_obj_name(obj, new_dir, new_name, force,
+ existing_target->obj_id);
+ existing_target->is_shadowed = 1;
+ yaffs_unlink_obj(existing_target);
+ dev->gc_disable = 0;
+ }
+
+ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+ yaffs_update_parent(old_dir);
+ if (new_dir != old_dir)
+ yaffs_update_parent(new_dir);
+
+ return result;
+ }
+ return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning)
+{
+ struct yaffs_obj *obj;
+
+ if (backward_scanning) {
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+ obj = yaffs_find_by_number(dev, obj_id);
+ if (obj)
+ return;
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that
+ * it can do shrinking etc.
+ * We put it in unlinked dir to be cleaned up after the scanning
+ */
+ obj =
+ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+ if (!obj)
+ return;
+ obj->is_shadowed = 1;
+ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+ obj->variant.file_variant.shrink_size = 0;
+ obj->valid = 1; /* So that we don't read any other info. */
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
+{
+ struct list_head *lh;
+ struct list_head *save;
+ struct yaffs_obj *hl;
+ struct yaffs_obj *in;
+
+ list_for_each_safe(lh, save, hard_list) {
+ hl = list_entry(lh, struct yaffs_obj, hard_links);
+ in = yaffs_find_by_number(dev,
+ hl->variant.hardlink_variant.equiv_id);
+
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardlink_variant.equiv_obj = in;
+ list_add(&hl->hard_links, &in->hard_links);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardlink_variant.equiv_obj = NULL;
+ INIT_LIST_HEAD(&hl->hard_links);
+ }
+ }
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+ /*
+ * Sort out state of unlinked and deleted objects after scanning.
+ */
+ struct list_head *i;
+ struct list_head *n;
+ struct yaffs_obj *l;
+
+ if (dev->read_only)
+ return;
+
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinked_dir->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+
+ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ * This code assumes that we don't ever change the current relationships
+ * between directories:
+ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ * lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been
+ * deleted leaving the object "hanging" without being rooted in the
+ * directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+ return (obj == dev->del_dir ||
+ obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *parent;
+ int i;
+ struct list_head *lh;
+ struct list_head *n;
+ int depth_limit;
+ int hanging;
+
+ if (dev->read_only)
+ return;
+
+ /* Iterate through the objects in each hash entry,
+ * looking at each object.
+ * Make sure it is rooted.
+ */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ parent = obj->parent;
+
+ if (yaffs_has_null_parent(dev, obj)) {
+ /* These directories are not hanging */
+ hanging = 0;
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ hanging = 1;
+ } else if (yaffs_has_null_parent(dev, parent)) {
+ hanging = 0;
+ } else {
+ /*
+ * Need to follow the parent chain to
+ * see if it is hanging.
+ */
+ hanging = 0;
+ depth_limit = 100;
+
+ while (parent != dev->root_dir &&
+ parent->parent &&
+ parent->parent->variant_type ==
+ YAFFS_OBJECT_TYPE_DIRECTORY &&
+ depth_limit > 0) {
+ parent = parent->parent;
+ depth_limit--;
+ }
+ if (parent != dev->root_dir)
+ hanging = 1;
+ }
+ if (hanging) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Hanging object %d moved to lost and found",
+ obj->obj_id);
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+ }
+ }
+ }
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+ struct yaffs_obj *obj;
+ struct list_head *lh;
+ struct list_head *n;
+
+ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ BUG();
+
+ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffs_del_dir_contents(obj);
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Deleting lost_found object %d",
+ obj->obj_id);
+ yaffs_unlink_obj(obj);
+ }
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+ yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+ const YCHAR *name)
+{
+ int sum;
+ struct list_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+ struct yaffs_obj *l;
+
+ if (!name)
+ return NULL;
+
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: null pointer directory"
+ );
+ BUG();
+ return NULL;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: non-directory"
+ );
+ BUG();
+ }
+
+ sum = yaffs_calc_name_sum(name);
+
+ list_for_each(i, &directory->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+
+ if (l->parent != directory)
+ BUG();
+
+ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
+ return l;
+ } else if (l->sum == sum || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_get_obj_name(l, buffer,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
+ return l;
+ }
+ }
+ return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj = obj->variant.hardlink_variant.equiv_obj;
+ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+}
+
+/*
+ * A note or two on object names.
+ * * If the object name is missing, we then make one up in the form objnnn
+ *
+ * * ASCII names are stored in the object header's name field from byte zero
+ * * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ * - If the name can fit in the ASCII character space then they are saved as
+ * ascii names as per above.
+ * - If the name needs Unicode then the name is saved in Unicode
+ * starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+ int buffer_size)
+{
+ /* Create an object name if we could not find one. */
+ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+ YCHAR local_name[20];
+ YCHAR num_string[20];
+ YCHAR *x = &num_string[19];
+ unsigned v = obj->obj_id;
+ num_string[19] = 0;
+ while (v > 0) {
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+ strcat(local_name, x);
+ strncpy(name, local_name, buffer_size - 1);
+ }
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
+{
+ memset(name, 0, buffer_size * sizeof(YCHAR));
+ yaffs_check_obj_details_loaded(obj);
+ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+ } else if (obj->short_name[0]) {
+ strcpy(name, obj->short_name);
+ } else if (obj->hdr_chunk > 0) {
+ int result;
+ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
+
+ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+ if (obj->hdr_chunk > 0) {
+ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+ obj->hdr_chunk,
+ buffer, NULL);
+ }
+ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+ buffer_size);
+
+ yaffs_release_temp_buffer(obj->my_dev, buffer);
+ }
+
+ yaffs_fix_null_name(obj, name, buffer_size);
+
+ return strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+ /* Dereference any hard linking */
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.file_variant.file_size;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ if (!obj->variant.symlink_variant.alias)
+ return 0;
+ return strnlen(obj->variant.symlink_variant.alias,
+ YAFFS_MAX_ALIAS_LENGTH);
+ } else {
+ /* Only a directory should drop through to here */
+ return obj->my_dev->data_bytes_per_chunk;
+ }
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+ int count = 0;
+ struct list_head *i;
+
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+ list_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return DT_DIR;
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return DT_LNK;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ if (S_ISFIFO(obj->yst_mode))
+ return DT_FIFO;
+ if (S_ISCHR(obj->yst_mode))
+ return DT_CHR;
+ if (S_ISBLK(obj->yst_mode))
+ return DT_BLK;
+ if (S_ISSOCK(obj->yst_mode))
+ return DT_SOCK;
+ return DT_REG;
+ break;
+ default:
+ return DT_REG;
+ break;
+ }
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+ return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(struct yaffs_dev *dev)
+{
+ struct yaffs_driver *drv = &dev->drv;
+ struct yaffs_tags_handler *tagger = &dev->tagger;
+
+ /* Common functions, gotta have */
+ if (!drv->drv_read_chunk_fn ||
+ !drv->drv_write_chunk_fn ||
+ !drv->drv_erase_fn)
+ return 0;
+
+ if (dev->param.is_yaffs2 &&
+ (!drv->drv_mark_bad_fn || !drv->drv_check_bad_fn))
+ return 0;
+
+ /* Install the default tags marshalling functions if needed. */
+ yaffs_tags_compat_install(dev);
+ yaffs_tags_marshall_install(dev);
+
+ /* Check we now have the marshalling functions required. */
+ if (!tagger->write_chunk_tags_fn ||
+ !tagger->read_chunk_tags_fn ||
+ !tagger->query_block_fn ||
+ !tagger->mark_bad_fn)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+ /* Initialise the unlinked, deleted, root and lost+found directories */
+ dev->lost_n_found = dev->root_dir = NULL;
+ dev->unlinked_dir = dev->del_dir = NULL;
+ dev->unlinked_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+ dev->del_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+ dev->root_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+ dev->lost_n_found =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+ && dev->del_dir) {
+ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+ return YAFFS_FAIL;
+}
+
+/* Low level init.
+ * Typically only used by yaffs_guts_initialise, but also used by the
+ * Low level yaffs driver tests.
+ */
+
+int yaffs_guts_ll_init(struct yaffs_dev *dev)
+{
+
+
+ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_ll_init()");
+
+ if (!dev) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Need a device"
+ );
+ return YAFFS_FAIL;
+ }
+
+ if (dev->ll_init)
+ return YAFFS_OK;
+
+ dev->internal_start_block = dev->param.start_block;
+ dev->internal_end_block = dev->param.end_block;
+ dev->block_offset = 0;
+ dev->chunk_offset = 0;
+ dev->n_free_chunks = 0;
+
+ dev->gc_block = 0;
+
+ if (dev->param.start_block == 0) {
+ dev->internal_start_block = dev->param.start_block + 1;
+ dev->internal_end_block = dev->param.end_block + 1;
+ dev->block_offset = 1;
+ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 1024) ||
+ (!dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 512) ||
+ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
+ dev->param.chunks_per_block < 2 ||
+ dev->param.n_reserved_blocks < 2 ||
+ dev->internal_start_block <= 0 ||
+ dev->internal_end_block <= 0 ||
+ dev->internal_end_block <=
+ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+ ) {
+ /* otherwise it is too small */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+ dev->param.total_bytes_per_chunk,
+ dev->param.is_yaffs2 ? "2" : "",
+ dev->param.inband_tags);
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+ if (dev->param.inband_tags)
+ dev->data_bytes_per_chunk =
+ dev->param.total_bytes_per_chunk -
+ sizeof(struct yaffs_packed_tags2_tags_only);
+ else
+ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+ if (!yaffs_check_dev_fns(dev)) {
+ /* Function missing */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "device function(s) missing or wrong");
+
+ return YAFFS_FAIL;
+ }
+
+ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+ return YAFFS_FAIL;
+ }
+
+ return YAFFS_OK;
+}
+
+
+int yaffs_format_dev(struct yaffs_dev *dev)
+{
+ int i;
+ enum yaffs_block_state state;
+ u32 dummy;
+
+ if(yaffs_guts_ll_init(dev) != YAFFS_OK)
+ return YAFFS_FAIL;
+
+ if(dev->is_mounted)
+ return YAFFS_FAIL;
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ yaffs_query_init_block_state(dev, i, &state, &dummy);
+ if (state != YAFFS_BLOCK_STATE_DEAD)
+ yaffs_erase_block(dev, i);
+ }
+
+ return YAFFS_OK;
+}
+
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+ if(yaffs_guts_ll_init(dev) != YAFFS_OK)
+ return YAFFS_FAIL;
+
+ if (dev->is_mounted) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+ return YAFFS_FAIL;
+ }
+
+ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ x = dev->data_bytes_per_chunk;
+ /* We always use dev->chunk_shift and dev->chunk_div */
+ dev->chunk_shift = calc_shifts(x);
+ x >>= dev->chunk_shift;
+ dev->chunk_div = x;
+ /* We only use chunk mask if chunk_div is 1 */
+ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+ /*
+ * Calculate chunk_grp_bits.
+ * We need to find the next power of 2 > than internal_end_block
+ */
+
+ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = calc_shifts_ceiling(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+ dev->tnode_width = 16;
+ else
+ dev->tnode_width = bits;
+ } else {
+ dev->tnode_width = 16;
+ }
+
+ dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+ * to figure out chunk shift and chunk_grp_size
+ */
+
+ if (bits <= dev->tnode_width)
+ dev->chunk_grp_bits = 0;
+ else
+ dev->chunk_grp_bits = bits - dev->tnode_width;
+
+ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+ if (dev->tnode_size < sizeof(struct yaffs_tnode))
+ dev->tnode_size = sizeof(struct yaffs_tnode);
+
+ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+ */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+ return YAFFS_FAIL;
+ }
+
+ /* Finished verifying the device, continue with initialisation */
+
+ /* More device initialisation */
+ dev->all_gcs = 0;
+ dev->passive_gc_count = 0;
+ dev->oldest_dirty_gc_count = 0;
+ dev->bg_gcs = 0;
+ dev->gc_block_finder = 0;
+ dev->buffered_block = -1;
+ dev->doing_buffered_block_rewrite = 0;
+ dev->n_deleted_files = 0;
+ dev->n_bg_deletions = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_ecc_fixed = 0;
+ dev->n_ecc_unfixed = 0;
+ dev->n_tags_ecc_fixed = 0;
+ dev->n_tags_ecc_unfixed = 0;
+ dev->n_erase_failures = 0;
+ dev->n_erased_blocks = 0;
+ dev->gc_disable = 0;
+ dev->has_pending_prioritised_gc = 1;
+ /* Assume the worst for now, will get fixed on first GC */
+ INIT_LIST_HEAD(&dev->dirty_dirs);
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+ dev->cache = NULL;
+ dev->gc_cleanup_list = NULL;
+
+ if (!init_failed && dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+ int cache_bytes =
+ dev->param.n_caches * sizeof(struct yaffs_cache);
+
+ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+ buf = (u8 *) dev->cache;
+
+ if (dev->cache)
+ memset(dev->cache, 0, cache_bytes);
+
+ for (i = 0; i < dev->param.n_caches && buf; i++) {
+ dev->cache[i].object = NULL;
+ dev->cache[i].last_use = 0;
+ dev->cache[i].dirty = 0;
+ dev->cache[i].data = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+ if (!buf)
+ init_failed = 1;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_hits = 0;
+
+ if (!init_failed) {
+ dev->gc_cleanup_list =
+ kmalloc(dev->param.chunks_per_block * sizeof(u32),
+ GFP_NOFS);
+ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+ if (dev->param.is_yaffs2)
+ dev->param.use_header_file_size = 1;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && dev->param.is_yaffs2 &&
+ !dev->param.disable_summary &&
+ !yaffs_summary_init(dev))
+ init_failed = 1;
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->param.is_yaffs2) {
+ if (yaffs2_checkpt_restore(dev)) {
+ yaffs_check_obj_details_loaded(dev->root_dir);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT |
+ YAFFS_TRACE_MOUNT,
+ "yaffs: restored from checkpoint"
+ );
+ } else {
+
+ /* Clean up the mess caused by an aborted
+ * checkpoint load then scan backwards.
+ */
+ yaffs_deinit_blocks(dev);
+
+ yaffs_deinit_tnodes_and_objs(dev);
+
+ dev->n_erased_blocks = 0;
+ dev->n_free_chunks = 0;
+ dev->alloc_block = -1;
+ dev->alloc_page = -1;
+ dev->n_deleted_files = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_bg_deletions = 0;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed
+ && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+ } else if (!yaffs1_scan(dev)) {
+ init_failed = 1;
+ }
+
+ yaffs_strip_deleted_objs(dev);
+ yaffs_fix_hanging_objs(dev);
+ if (dev->param.empty_lost_n_found)
+ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() aborted.");
+
+ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+ dev->n_page_reads = 0;
+ dev->n_page_writes = 0;
+ dev->n_erasures = 0;
+ dev->n_gc_copies = 0;
+ dev->n_retried_writes = 0;
+
+ dev->n_retired_blocks = 0;
+
+ yaffs_verify_free_chunks(dev);
+ yaffs_verify_blocks(dev);
+
+ /* Clean up any aborted checkpoint data */
+ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() done.");
+ return YAFFS_OK;
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+ if (dev->is_mounted) {
+ int i;
+
+ yaffs_deinit_blocks(dev);
+ yaffs_deinit_tnodes_and_objs(dev);
+ yaffs_summary_deinit(dev);
+
+ if (dev->param.n_caches > 0 && dev->cache) {
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ kfree(dev->cache[i].data);
+ dev->cache[i].data = NULL;
+ }
+
+ kfree(dev->cache);
+ dev->cache = NULL;
+ }
+
+ kfree(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ kfree(dev->temp_buffer[i].buffer);
+
+ dev->is_mounted = 0;
+
+ yaffs_deinit_nand(dev);
+ }
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+ int n_free = 0;
+ int b;
+ struct yaffs_block_info *blk;
+
+ blk = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ n_free +=
+ (dev->param.chunks_per_block - blk->pages_in_use +
+ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
+ blk++;
+ }
+ return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+ /* This is what we report to the outside world */
+ int n_free;
+ int n_dirty_caches;
+ int blocks_for_checkpt;
+ int i;
+
+ n_free = dev->n_free_chunks;
+ n_free += dev->n_deleted_files;
+
+ /* Now count and subtract the number of dirty chunks in the cache. */
+
+ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].dirty)
+ n_dirty_caches++;
+ }
+
+ n_free -= n_dirty_caches;
+
+ n_free -=
+ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now figure checkpoint space and report that... */
+ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+ if (n_free < 0)
+ n_free = 0;
+
+ return n_free;
+}
+
+
+
+/*
+ * Marshalling functions to get loff_t file sizes into and out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
+{
+ oh->file_size_low = (fsize & 0xFFFFFFFF);
+ oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
+}
+
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
+{
+ loff_t retval;
+
+ if (sizeof(loff_t) >= 8 && ~(oh->file_size_high))
+ retval = (((loff_t) oh->file_size_high) << 32) |
+ (((loff_t) oh->file_size_low) & 0xFFFFFFFF);
+ else
+ retval = (loff_t) oh->file_size_low;
+
+ return retval;
+}
+
+
+void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10])
+{
+ int i;
+ struct yaffs_block_info *bi;
+ int s;
+
+ for(i = 0; i < 10; i++)
+ bs[i] = 0;
+
+ for(i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ bi = yaffs_get_block_info(dev, i);
+ s = bi->block_state;
+ if(s > YAFFS_BLOCK_STATE_DEAD || s < YAFFS_BLOCK_STATE_UNKNOWN)
+ bs[0]++;
+ else
+ bs[s]++;
+ }
+}
diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
new file mode 100644
index 000000000000..05785367cb2e
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,1003 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK 1
+#define YAFFS_FAIL 0
+
+/* Give us a Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xff
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC 0x5941ff53
+
+/*
+ * Tnodes form a tree with the tnodes in "levels"
+ * Levels greater than 0 hold 8 slots which point to other tnodes.
+ * Those at level 0 hold 16 slots which point to chunks in NAND.
+ *
+ * A maximum level of 8 thust supports files of size up to:
+ *
+ * 2^(3*MAX_LEVEL+4)
+ *
+ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives
+ * a maximum file size of around 512Gbytees with 2k chunks.
+ */
+#define YAFFS_NTNODES_LEVEL0 16
+#define YAFFS_TNODES_LEVEL0_BITS 4
+#define YAFFS_TNODES_LEVEL0_MASK 0xf
+
+#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK 0x7
+#define YAFFS_TNODES_MAX_LEVEL 8
+#define YAFFS_TNODES_MAX_BITS (YAFFS_TNODES_LEVEL0_BITS + \
+ YAFFS_TNODES_INTERNAL_BITS * \
+ YAFFS_TNODES_MAX_LEVEL)
+#define YAFFS_MAX_CHUNK_ID ((1 << YAFFS_TNODES_MAX_BITS) - 1)
+
+#define YAFFS_MAX_FILE_SIZE_32 0x7fffffff
+
+/* Constants for YAFFS1 mode */
+#define YAFFS_BYTES_PER_SPARE 16
+#define YAFFS_BYTES_PER_CHUNK 512
+#define YAFFS_CHUNK_SIZE_SHIFT 9
+#define YAFFS_CHUNKS_PER_BLOCK 32
+#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
+
+
+
+#define YAFFS_ALLOCATION_NOBJECTS 100
+#define YAFFS_ALLOCATION_NTNODES 100
+#define YAFFS_ALLOCATION_NLINKS 100
+
+#define YAFFS_NOBJECT_BUCKETS 256
+
+#define YAFFS_OBJECT_SPACE 0x40000
+#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1)
+
+/* Binary data version stamps */
+#define YAFFS_SUMMARY_VERSION 1
+#define YAFFS_CHECKPOINT_VERSION 7
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH 127
+#define YAFFS_MAX_ALIAS_LENGTH 79
+#else
+#define YAFFS_MAX_NAME_LENGTH 255
+#define YAFFS_MAX_ALIAS_LENGTH 159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH 15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT 1
+#define YAFFS_OBJECTID_LOSTNFOUND 2
+#define YAFFS_OBJECTID_UNLINKED 3
+#define YAFFS_OBJECTID_DELETED 4
+
+/* Fake object Id for summary data */
+#define YAFFS_OBJECTID_SUMMARY 0x10
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES 20
+
+#define YAFFS_N_TEMP_BUFFERS 6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS (5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK 0xffff0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+ struct yaffs_obj *object;
+ int chunk_id;
+ int last_use;
+ int dirty;
+ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+ u8 *data;
+};
+
+/* yaffs1 tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary
+ * otherwise the structure size will get blown out.
+ */
+
+struct yaffs_tags {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes_lsb:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+ struct yaffs_tags as_tags;
+ u8 as_bytes[8];
+};
+
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+ YAFFS_ECC_RESULT_UNKNOWN,
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+ YAFFS_OBJECT_TYPE_FILE,
+ YAFFS_OBJECT_TYPE_SYMLINK,
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+ unsigned chunk_used; /* Status of the chunk: used or unused */
+ unsigned obj_id; /* If 0 this is not used */
+ unsigned chunk_id; /* If 0 this is a header, else a data chunk */
+ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+ enum yaffs_ecc_result ecc_result;
+ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+ unsigned is_deleted; /* The chunk is marked deleted */
+ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+ unsigned extra_available; /* Extra info available if not zero */
+ unsigned extra_parent_id; /* The parent object */
+ unsigned extra_is_shrink; /* Is it a shrink header? */
+ unsigned extra_shadows; /* Does this shadow another object? */
+
+ enum yaffs_obj_type extra_obj_type; /* What object type? */
+
+ loff_t extra_file_size; /* Length if it is a file */
+ unsigned extra_equiv_id; /* Equivalent object for a hard link */
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+ u8 tb0;
+ u8 tb1;
+ u8 tb2;
+ u8 tb3;
+ u8 page_status; /* set to 0 to delete the chunk */
+ u8 block_status;
+ u8 tb4;
+ u8 tb5;
+ u8 ecc1[3];
+ u8 tb6;
+ u8 tb7;
+ u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+ struct yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
+ /* Being scanned */
+
+ YAFFS_BLOCK_STATE_NEEDS_SCAN,
+ /* The block might have something on it (ie it is allocating or full,
+ * perhaps empty) but it needs to be scanned to determine its true
+ * state.
+ * This state is only valid during scanning.
+ * NB We tolerate empty because the pre-scanner might be incapable of
+ * deciding
+ * However, if this state is returned on a YAFFS2 device,
+ * then we expect a sequence number
+ */
+
+ YAFFS_BLOCK_STATE_EMPTY,
+ /* This block is empty */
+
+ YAFFS_BLOCK_STATE_ALLOCATING,
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+ * allocation. Should never be more than one of these.
+ * If a block is only partially allocated at mount it is treated as
+ * full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
+ * If a block was only partially allocated when mounted we treat
+ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+};
+
+#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+ int soft_del_pages:10; /* number of soft deleted pages */
+ int pages_in_use:10; /* number of pages in use */
+ unsigned block_state:4; /* One of the above block states. */
+ /* NB use unsigned because enum is sometimes
+ * an int */
+ u32 needs_retiring:1; /* Data has failed on this block, */
+ /*need to get valid data off and retire*/
+ u32 skip_erased_check:1;/* Skip the erased check on this block */
+ u32 gc_prioritise:1; /* An ECC check or blank check has failed.
+ Block should be prioritised for GC */
+ u32 chunk_error_strikes:3; /* How many times we've had ecc etc
+ failures on this block and tried to reuse it */
+ u32 has_summary:1; /* The block has a summary */
+
+ u32 has_shrink_hdr:1; /* This block has at least one shrink header */
+ u32 seq_number; /* block sequence number for yaffs2 */
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+ enum yaffs_obj_type type;
+
+ /* Apply to everything */
+ int parent_obj_id;
+ u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to all object types except for hard links */
+ u32 yst_mode; /* protection */
+
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+
+ /* File size applies to files only */
+ u32 file_size_low;
+
+ /* Equivalent object id applies to hard links only. */
+ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+ u32 yst_rdev; /* stuff for block and char devices (major/min) */
+
+ u32 win_ctime[2];
+ u32 win_atime[2];
+ u32 win_mtime[2];
+
+ u32 inband_shadowed_obj_id;
+ u32 inband_is_shrink;
+
+ u32 file_size_high;
+ u32 reserved[1];
+ int shadows_obj; /* This object header shadows the
+ specified object if > 0 */
+
+ /* is_shrink applies to object headers written when wemake a hole. */
+ u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------ Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+ loff_t file_size;
+ loff_t scanned_size;
+ loff_t shrink_size;
+ int top_level;
+ struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+ struct list_head children; /* list of child links */
+ struct list_head dirty; /* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+ YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+ struct yaffs_obj *equiv_obj;
+ u32 equiv_id;
+};
+
+union yaffs_obj_var {
+ struct yaffs_file_var file_variant;
+ struct yaffs_dir_var dir_variant;
+ struct yaffs_symlink_var symlink_variant;
+ struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+ u8 deleted:1; /* This should only apply to unlinked files. */
+ u8 soft_del:1; /* it has also been soft deleted */
+ u8 unlinked:1; /* An unlinked file.*/
+ u8 fake:1; /* A fake object has no presence on NAND. */
+ u8 rename_allowed:1; /* Some objects cannot be renamed. */
+ u8 unlink_allowed:1;
+ u8 dirty:1; /* the object needs to be written to flash */
+ u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available
+ * ie. file data chunks encountered before
+ * the header.
+ */
+ u8 lazy_loaded:1; /* This object has been lazy loaded and
+ * is missing some detail */
+
+ u8 defered_free:1; /* Object is removed from NAND, but is
+ * still in the inode cache.
+ * Free of object is defered.
+ * until the inode is released.
+ */
+ u8 being_created:1; /* This object is still being created
+ * so skip some verification checks. */
+ u8 is_shadowed:1; /* This object is shadowed on the way
+ * to being renamed. */
+
+ u8 xattr_known:1; /* We know if this has object has xattribs
+ * or not. */
+ u8 has_xattr:1; /* This object has xattribs.
+ * Only valid if xattr_known. */
+
+ u8 serial; /* serial number of chunk in NAND.*/
+ u16 sum; /* sum of the name to speed searching */
+
+ struct yaffs_dev *my_dev; /* The device I'm on */
+
+ struct list_head hash_link; /* list of objects in hash bucket */
+
+ struct list_head hard_links; /* hard linked object chain*/
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_obj *parent;
+ struct list_head siblings;
+
+ /* Where's my object header in NAND? */
+ int hdr_chunk;
+
+ int n_data_chunks; /* Number of data chunks for this file. */
+
+ u32 obj_id; /* the object id value */
+
+ u32 yst_mode;
+
+ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+
+#ifdef CONFIG_YAFFS_WINCE
+ u32 win_ctime[2];
+ u32 win_mtime[2];
+ u32 win_atime[2];
+#else
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+#endif
+
+ u32 yst_rdev;
+
+ void *my_inode;
+
+ enum yaffs_obj_type variant_type;
+
+ union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+ struct list_head list;
+ int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+ int struct_type;
+ u32 obj_id;
+ u32 parent_id;
+ int hdr_chunk;
+ enum yaffs_obj_type variant_type:3;
+ u8 deleted:1;
+ u8 soft_del:1;
+ u8 unlinked:1;
+ u8 fake:1;
+ u8 rename_allowed:1;
+ u8 unlink_allowed:1;
+ u8 serial;
+ int n_data_chunks;
+ loff_t size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few.
+ */
+
+struct yaffs_buffer {
+ u8 *buffer;
+ int in_use;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+ const YCHAR *name;
+
+ /*
+ * Entry parameters set up way early. Yaffs sets up the rest.
+ * The structure should be zeroed out before use so that unused
+ * and default values are zero.
+ */
+
+ int inband_tags; /* Use unband tags */
+ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to
+ be a power of 2 */
+ int chunks_per_block; /* does not need to be a power of 2 */
+ int spare_bytes_per_chunk; /* spare area size */
+ int start_block; /* Start block we're allowed to use */
+ int end_block; /* End block we're allowed to use */
+ int n_reserved_blocks; /* Tuneable so that we can reduce
+ * reserved blocks on NOR and RAM. */
+
+ int n_caches; /* If <= 0, then short op caching is disabled,
+ * else the number of short op caches.
+ */
+ int cache_bypass_aligned; /* If non-zero then bypass the cache for
+ * aligned writes.
+ */
+
+ int use_nand_ecc; /* Flag to decide whether or not to use
+ * NAND driver ECC on data (yaffs1) */
+ int tags_9bytes; /* Use 9 byte tags */
+ int no_tags_ecc; /* Flag to decide whether or not to do ECC
+ * on packed tags (yaffs2) */
+
+ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+ int refresh_period; /* How often to check for a block refresh */
+
+ /* Checkpoint control. Can be set before or after initialisation */
+ u8 skip_checkpt_rd;
+ u8 skip_checkpt_wr;
+
+ int enable_xattr; /* Enable xattribs */
+
+ int max_objects; /*
+ * Set to limit the number of objects created.
+ * 0 = no limit.
+ */
+
+ /* The remove_obj_fn function must be supplied by OS flavours that
+ * need it.
+ * yaffs direct uses it to implement the faster readdir.
+ * Linux uses it to protect the directory during unlocking.
+ */
+ void (*remove_obj_fn) (struct yaffs_obj *obj);
+
+ /* Callback to mark the superblock dirty */
+ void (*sb_dirty_fn) (struct yaffs_dev *dev);
+
+ /* Callback to control garbage collection. */
+ unsigned (*gc_control_fn) (struct yaffs_dev *dev);
+
+ /* Debug control flags. Don't use unless you know what you're doing */
+ int use_header_file_size; /* Flag to determine if we should use
+ * file sizes from the header */
+ int disable_lazy_load; /* Disable lazy loading on this device */
+ int wide_tnodes_disabled; /* Set to disable wide tnodes */
+ int disable_soft_del; /* yaffs 1 only: Set to disable the use of
+ * softdeletion. */
+
+ int defered_dir_update; /* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ int auto_unicode;
+#endif
+ int always_check_erased; /* Force chunk erased check always on */
+
+ int disable_summary;
+ int disable_bad_block_marking;
+
+};
+
+struct yaffs_driver {
+ int (*drv_write_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, int data_len,
+ const u8 *oob, int oob_len);
+ int (*drv_read_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, int data_len,
+ u8 *oob, int oob_len,
+ enum yaffs_ecc_result *ecc_result);
+ int (*drv_erase_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_mark_bad_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_check_bad_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_initialise_fn) (struct yaffs_dev *dev);
+ int (*drv_deinitialise_fn) (struct yaffs_dev *dev);
+};
+
+struct yaffs_tags_handler {
+ int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_ext_tags *tags);
+ int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_ext_tags *tags);
+
+ int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number);
+ int (*mark_bad_fn) (struct yaffs_dev *dev, int block_no);
+};
+
+struct yaffs_dev {
+ struct yaffs_param param;
+ struct yaffs_driver drv;
+ struct yaffs_tags_handler tagger;
+
+ /* Context storage. Holds extra OS specific data for this device */
+
+ void *os_context;
+ void *driver_context;
+
+ struct list_head dev_list;
+
+ int ll_init;
+ /* Runtime parameters. Set up by YAFFS. */
+ int data_bytes_per_chunk;
+
+ /* Non-wide tnode stuff */
+ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
+ * the tnodes are not wide enough.
+ */
+ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+ u32 tnode_width;
+ u32 tnode_mask;
+ u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+ u32 chunk_shift; /* Shift value */
+ u32 chunk_div; /* Divisor after shifting: 1 for 2^n sizes */
+ u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+ int is_mounted;
+ int read_only;
+ int is_checkpointed;
+
+ /* Stuff to support block offsetting to support start block zero */
+ int internal_start_block;
+ int internal_end_block;
+ int block_offset;
+ int chunk_offset;
+
+ /* Runtime checkpointing stuff */
+ int checkpt_page_seq; /* running sequence number of checkpt pages */
+ int checkpt_byte_count;
+ int checkpt_byte_offs;
+ u8 *checkpt_buffer;
+ int checkpt_open_write;
+ int blocks_in_checkpt;
+ int checkpt_cur_chunk;
+ int checkpt_cur_block;
+ int checkpt_next_block;
+ int *checkpt_block_list;
+ int checkpt_max_blocks;
+ u32 checkpt_sum;
+ u32 checkpt_xor;
+
+ int checkpoint_blocks_required; /* Number of blocks needed to store
+ * current checkpoint set */
+
+ /* Block Info */
+ struct yaffs_block_info *block_info;
+ u8 *chunk_bits; /* bitmap of chunks in use */
+ unsigned block_info_alt:1; /* allocated using alternative alloc */
+ unsigned chunk_bits_alt:1; /* allocated using alternative alloc */
+ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
+ * Must be consistent with chunks_per_block.
+ */
+
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int alloc_block_finder; /* Used to search for next allocation block */
+
+ /* Object and Tnode memory management */
+ void *allocator;
+ int n_obj;
+ int n_tnodes;
+
+ int n_hardlinks;
+
+ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+ u32 bucket_finder;
+
+ int n_free_chunks;
+
+ /* Garbage collection control */
+ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
+ u32 n_clean_ups;
+
+ unsigned has_pending_prioritised_gc; /* We think this device might
+ have pending prioritised gcs */
+ unsigned gc_disable;
+ unsigned gc_block_finder;
+ unsigned gc_dirtiest;
+ unsigned gc_pages_in_use;
+ unsigned gc_not_done;
+ unsigned gc_block;
+ unsigned gc_chunk;
+ unsigned gc_skip;
+ struct yaffs_summary_tags *gc_sum_tags;
+
+ /* Special directories */
+ struct yaffs_obj *root_dir;
+ struct yaffs_obj *lost_n_found;
+
+ int buffered_block; /* Which block is buffered here? */
+ int doing_buffered_block_rewrite;
+
+ struct yaffs_cache *cache;
+ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files. */
+ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted
+ files live. */
+ struct yaffs_obj *del_dir; /* Directory where deleted objects are
+ sent to disappear. */
+ struct yaffs_obj *unlinked_deletion; /* Current file being
+ background deleted. */
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+ int max_temp;
+ int temp_in_use;
+ int unmanaged_buffer_allocs;
+ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently
+ allocating block */
+ unsigned oldest_dirty_seq;
+ unsigned oldest_dirty_block;
+
+ /* Block refreshing */
+ int refresh_skip; /* A skip down counter.
+ * Refresh happens when this gets to zero. */
+
+ /* Dirty directory handling */
+ struct list_head dirty_dirs; /* List of dirty directories */
+
+ /* Summary */
+ int chunks_per_summary;
+ struct yaffs_summary_tags *sum_tags;
+
+ /* Statistics */
+ u32 n_page_writes;
+ u32 n_page_reads;
+ u32 n_erasures;
+ u32 n_bad_markings;
+ u32 n_erase_failures;
+ u32 n_gc_copies;
+ u32 all_gcs;
+ u32 passive_gc_count;
+ u32 oldest_dirty_gc_count;
+ u32 n_gc_blocks;
+ u32 bg_gcs;
+ u32 n_retried_writes;
+ u32 n_retired_blocks;
+ u32 n_ecc_fixed;
+ u32 n_ecc_unfixed;
+ u32 n_tags_ecc_fixed;
+ u32 n_tags_ecc_unfixed;
+ u32 n_deletions;
+ u32 n_unmarked_deletions;
+ u32 refresh_count;
+ u32 cache_hits;
+ u32 tags_used;
+ u32 summary_used;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes
+ *at runtime and must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+ int struct_type;
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int n_free_chunks;
+
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently
+ * allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+ int struct_type;
+ u32 magic;
+ u32 version;
+ u32 head;
+};
+
+struct yaffs_shadow_fixer {
+ int obj_id;
+ int shadowed_id;
+ struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+ int set; /* If 0 then this is a deletion */
+ const YCHAR *name;
+ const void *data;
+ int size;
+ int flags;
+ int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+ int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+ u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+ const YCHAR *name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
+ struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, const YCHAR *alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
+ const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
+ int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+int yaffs_guts_ll_init(struct yaffs_dev *dev);
+
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn);
+int yaffs_check_ff(u8 *buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR *str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
+ int force, int is_shrink, int shadows,
+ struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+
+int yaffs_format_dev(struct yaffs_dev *dev);
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 *offset_out);
+/*
+ * Marshalling functions to get loff_t file sizes into aand out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize);
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh);
+loff_t yaffs_max_file_size(struct yaffs_dev *dev);
+
+/*
+ * Debug function to count number of blocks in each state
+ * NB Needs to be called with correct number of integers
+ */
+
+void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10]);
+
+int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags);
+
+#endif
diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h
new file mode 100644
index 000000000000..c20ab14b7fa2
--- /dev/null
+++ b/fs/yaffs2/yaffs_linux.h
@@ -0,0 +1,48 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_LINUX_H__
+#define __YAFFS_LINUX_H__
+
+#include "yportenv.h"
+
+struct yaffs_linux_context {
+ struct list_head context_list; /* List of these we have mounted */
+ struct yaffs_dev *dev;
+ struct super_block *super;
+ struct task_struct *bg_thread; /* Background thread for this device */
+ int bg_running;
+ struct mutex gross_lock; /* Gross locking mutex*/
+ u8 *spare_buffer; /* For mtdif2 use. Don't know the buffer size
+ * at compile time so we have to allocate it.
+ */
+ struct list_head search_contexts;
+ struct task_struct *readdir_process;
+ unsigned mount_id;
+ int dirty;
+};
+
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) ((mtd)->writesize)
+#else
+#define WRITE_SIZE_STR "oobblock"
+#define WRITE_SIZE(mtd) ((mtd)->oobblock)
+#endif
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c
new file mode 100644
index 000000000000..4ff5741f3983
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,300 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/rawnand.h"
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+
+#include "uapi/linux/major.h"
+
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_linux.h"
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+#define mtd_erase(m, ei) (m)->erase(m, ei)
+#define mtd_write_oob(m, addr, pops) (m)->write_oob(m, addr, pops)
+#define mtd_read_oob(m, addr, pops) (m)->read_oob(m, addr, pops)
+#define mtd_block_isbad(m, offs) (m)->block_isbad(m, offs)
+#define mtd_block_markbad(m, offs) (m)->block_markbad(m, offs)
+#endif
+
+
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ u32 addr =
+ ((loff_t) block_no) * dev->param.total_bytes_per_chunk *
+ dev->param.chunks_per_block;
+ struct erase_info ei;
+ int retval = 0;
+
+ ei.addr = addr;
+ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+
+ retval = mtd_erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+
+static int yaffs_mtd_write(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, int data_len,
+ const u8 *oob, int oob_len)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ loff_t addr;
+ struct mtd_oob_ops ops;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "yaffs_mtd_write(%p, %d, %p, %d, %p, %d)\n",
+ dev, nand_chunk, data, data_len, oob, oob_len);
+
+ if (!data || !data_len) {
+ data = NULL;
+ data_len = 0;
+ }
+
+ if (!oob || !oob_len) {
+ oob = NULL;
+ oob_len = 0;
+ }
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = (data) ? data_len : 0;
+ ops.ooblen = oob_len;
+ ops.datbuf = (u8 *)data;
+ ops.oobbuf = (u8 *)oob;
+
+ retval = mtd_write_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "write_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_read(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, int data_len,
+ u8 *oob, int oob_len,
+ enum yaffs_ecc_result *ecc_result)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ loff_t addr;
+ struct mtd_oob_ops ops;
+ int retval;
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = (data) ? data_len : 0;
+ ops.ooblen = oob_len;
+ ops.datbuf = data;
+ ops.oobbuf = oob;
+
+#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
+ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+ */
+ ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
+#endif
+ /* Read page and oob using MTD.
+ * Check status and determine ECC result.
+ */
+ retval = mtd_read_oob(mtd, addr, &ops);
+ if (retval)
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "read_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+
+ switch (retval) {
+ case 0:
+ /* no error */
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ break;
+
+ case -EBADMSG:
+ default:
+ /* MTD's ECC could not fix the data */
+ dev->n_ecc_unfixed++;
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ return YAFFS_FAIL;
+ }
+
+ return YAFFS_OK;
+}
+
+static int yaffs_mtd_erase(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+
+ loff_t addr;
+ struct erase_info ei;
+ int retval = 0;
+ u32 block_size;
+
+ block_size = dev->param.total_bytes_per_chunk *
+ dev->param.chunks_per_block;
+ addr = ((loff_t) block_no) * block_size;
+
+ ei.addr = addr;
+ ei.len = block_size;
+
+ retval = mtd_erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+static int yaffs_mtd_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no);
+
+ retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_check_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "checking block %d bad", block_no);
+
+ retval = mtd_block_isbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_initialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
+
+static int yaffs_mtd_deinitialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
+
+
+void yaffs_mtd_drv_install(struct yaffs_dev *dev)
+{
+ struct yaffs_driver *drv = &dev->drv;
+
+ drv->drv_write_chunk_fn = yaffs_mtd_write;
+ drv->drv_read_chunk_fn = yaffs_mtd_read;
+ drv->drv_erase_fn = yaffs_mtd_erase;
+ drv->drv_mark_bad_fn = yaffs_mtd_mark_bad;
+ drv->drv_check_bad_fn = yaffs_mtd_check_bad;
+ drv->drv_initialise_fn = yaffs_mtd_initialise;
+ drv->drv_deinitialise_fn = yaffs_mtd_deinitialise;
+}
+
+
+struct mtd_info * yaffs_get_mtd_device(dev_t sdev)
+{
+ struct mtd_info *mtd;
+
+ mtd = yaffs_get_mtd_device(sdev);
+
+ /* Check it's an mtd device..... */
+ if (MAJOR(sdev) != MTD_BLOCK_MAJOR)
+ return NULL; /* This isn't an mtd device */
+
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: MTD device is not NAND it's type %d",
+ mtd->type);
+ return NULL;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
+ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
+ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+ yaffs_trace(YAFFS_TRACE_OS, " size %u", mtd->size);
+#else
+ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
+#endif
+
+ return mtd;
+}
+
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags)
+{
+ if (yaffs_version == 2) {
+ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+ !inband_tags) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not have the right page sizes"
+ );
+ return -1;
+ }
+ } else {
+ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support have the right page sizes"
+ );
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+void yaffs_put_mtd_device(struct mtd_info *mtd)
+{
+ if(mtd)
+ put_mtd_device(mtd);
+}
diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h
new file mode 100644
index 000000000000..9cff224c6dec
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,25 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_mtd_drv_install(struct yaffs_dev *dev);
+struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
+void yaffs_put_mtd_device(struct mtd_info *mtd);
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags);
+#endif
diff --git a/fs/yaffs2/yaffs_nameval.c b/fs/yaffs2/yaffs_nameval.c
new file mode 100644
index 000000000000..4bdf4ed743a7
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.c
@@ -0,0 +1,208 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of
+* values and fits into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ * sizeof(int) bytes record size.
+ * strnlen+1 bytes name null terminated.
+ * nbytes value.
+ * ----------
+ * total size stored in record size
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR *name,
+ int *exist_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ if (!strncmp((YCHAR *) (xb + pos + sizeof(int)),
+ name, size)) {
+ if (exist_size)
+ *exist_size = size;
+ return pos;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ if (exist_size)
+ *exist_size = 0;
+ return -ENODATA;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR *name)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos < 0 || pos >= xb_size)
+ return -ENODATA;
+
+ /* Find size, shift rest over this record,
+ * then zero out the rest of buffer */
+ memcpy(&size, xb + pos, sizeof(int));
+ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+ memset(xb + (xb_size - size), 0, size);
+ return 0;
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf,
+ int bsize, int flags)
+{
+ int pos;
+ int namelen = strnlen(name, xb_size);
+ int reclen;
+ int size_exist = 0;
+ int space;
+ int start;
+
+ pos = nval_find(xb, xb_size, name, &size_exist);
+
+ if (flags & XATTR_CREATE && pos >= 0)
+ return -EEXIST;
+ if (flags & XATTR_REPLACE && pos < 0)
+ return -ENODATA;
+
+ start = nval_used(xb, xb_size);
+ space = xb_size - start + size_exist;
+
+ reclen = (sizeof(int) + namelen + 1 + bsize);
+
+ if (reclen > space)
+ return -ENOSPC;
+
+ if (pos >= 0) {
+ nval_del(xb, xb_size, name);
+ start = nval_used(xb, xb_size);
+ }
+
+ pos = start;
+
+ memcpy(xb + pos, &reclen, sizeof(int));
+ pos += sizeof(int);
+ strncpy((YCHAR *) (xb + pos), name, reclen);
+ pos += (namelen + 1);
+ memcpy(xb + pos, buf, bsize);
+ return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+
+ memcpy(&size, xb + pos, sizeof(int));
+ pos += sizeof(int); /* advance past record length */
+ size -= sizeof(int);
+
+ /* Advance over name string */
+ while (xb[pos] && size > 0 && pos < xb_size) {
+ pos++;
+ size--;
+ }
+ /*Advance over NUL */
+ pos++;
+ size--;
+
+ /* If bsize is zero then this is a size query.
+ * Return the size, but don't copy.
+ */
+ if (!bsize)
+ return size;
+
+ if (size <= bsize) {
+ memcpy(buf, xb + pos, size);
+ return size;
+ }
+ }
+ if (pos >= 0)
+ return -ERANGE;
+
+ return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+ int pos = 0;
+ int size;
+ int name_len;
+ int ncopied = 0;
+ int filled = 0;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > sizeof(int) &&
+ size <= xb_size &&
+ (pos + size) < xb_size &&
+ !filled) {
+ pos += sizeof(int);
+ size -= sizeof(int);
+ name_len = strnlen((YCHAR *) (xb + pos), size);
+ if (ncopied + name_len + 1 < bsize) {
+ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+ buf += name_len;
+ *buf = '\0';
+ buf++;
+ if (sizeof(YCHAR) > 1) {
+ *buf = '\0';
+ buf++;
+ }
+ ncopied += (name_len + 1);
+ } else {
+ filled = 1;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+ return nval_used(xb, xb_size) > 0;
+}
diff --git a/fs/yaffs2/yaffs_nameval.h b/fs/yaffs2/yaffs_nameval.h
new file mode 100644
index 000000000000..951e64f872b6
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c
new file mode 100644
index 000000000000..0d8499bdc630
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,122 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_getblockinfo.h"
+#include "yaffs_summary.h"
+
+static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
+{
+ return chunk - dev->chunk_offset;
+}
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 *buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ struct yaffs_ext_tags local_tags;
+ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
+
+ dev->n_page_reads++;
+
+ /* If there are no tags provided use local tags. */
+ if (!tags)
+ tags = &local_tags;
+
+ result = dev->tagger.read_chunk_tags_fn(dev, flash_chunk, buffer, tags);
+ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ struct yaffs_block_info *bi;
+ bi = yaffs_get_block_info(dev,
+ nand_chunk /
+ dev->param.chunks_per_block);
+ yaffs_handle_chunk_error(dev, bi);
+ }
+ return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
+
+ dev->n_page_writes++;
+
+ if (!tags) {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ tags->seq_number = dev->seq_number;
+ tags->chunk_used = 1;
+ yaffs_trace(YAFFS_TRACE_WRITE,
+ "Writing chunk %d tags %d %d",
+ nand_chunk, tags->obj_id, tags->chunk_id);
+
+ result = dev->tagger.write_chunk_tags_fn(dev, flash_chunk,
+ buffer, tags);
+
+ yaffs_summary_add(dev, tags, nand_chunk);
+
+ return result;
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ block_no -= dev->block_offset;
+ dev->n_bad_markings++;
+
+ if (dev->param.disable_bad_block_marking)
+ return YAFFS_OK;
+
+ return dev->tagger.mark_bad_fn(dev, block_no);
+}
+
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ block_no -= dev->block_offset;
+ return dev->tagger.query_block_fn(dev, block_no, state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ int result;
+
+ block_no -= dev->block_offset;
+ dev->n_erasures++;
+ result = dev->drv.drv_erase_fn(dev, block_no);
+ return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+ if (dev->drv.drv_initialise_fn)
+ return dev->drv.drv_initialise_fn(dev);
+ return YAFFS_OK;
+}
+
+int yaffs_deinit_nand(struct yaffs_dev *dev)
+{
+ if (dev->drv.drv_deinitialise_fn)
+ return dev->drv.drv_deinitialise_fn(dev);
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h
new file mode 100644
index 000000000000..804e97ad66d0
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+int yaffs_deinit_nand(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c
new file mode 100644
index 000000000000..dd9a331d8fc4
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,56 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+static const u8 all_ff[20] = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t)
+{
+ pt->chunk_id = t->chunk_id;
+ pt->serial_number = t->serial_number;
+ pt->n_bytes = t->n_bytes;
+ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unused_stuff = 0;
+ pt->should_be_ff = 0xffffffff;
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt)
+{
+
+ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+ t->block_bad = 0;
+ if (pt->should_be_ff != 0xffffffff)
+ t->block_bad = 1;
+ t->chunk_used = 1;
+ t->obj_id = pt->obj_id;
+ t->chunk_id = pt->chunk_id;
+ t->n_bytes = pt->n_bytes;
+ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ t->is_deleted = (pt->deleted) ? 0 : 1;
+ t->serial_number = pt->serial_number;
+ } else {
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+ }
+}
diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h
new file mode 100644
index 000000000000..b80f0a5b1574
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unused_stuff:1;
+ unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c
new file mode 100644
index 000000000000..e1d18cc33c68
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,197 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG 0x80000000
+#define EXTRA_SHRINK_FLAG 0x40000000
+#define EXTRA_SHADOWS_FLAG 0x20000000
+#define EXTRA_SPARE_FLAGS 0x10000000
+
+#define ALL_EXTRA_FLAGS 0xf0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(
+ const struct yaffs_packed_tags2_tags_only *ptt)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "packed tags obj %d chunk %d byte %d seq %d",
+ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+ yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+ t->seq_number);
+
+}
+
+static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t)
+{
+ if (t->chunk_id != 0 || !t->extra_available)
+ return 0;
+
+ /* Check if the file size is too long to store */
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE &&
+ (t->extra_file_size >> 31) != 0)
+ return 0;
+ return 1;
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+ const struct yaffs_ext_tags *t)
+{
+ ptt->chunk_id = t->chunk_id;
+ ptt->seq_number = t->seq_number;
+ ptt->n_bytes = t->n_bytes;
+ ptt->obj_id = t->obj_id;
+
+ /* Only store extra tags for object headers.
+ * If it is a file then only store if the file size is short\
+ * enough to fit.
+ */
+ if (yaffs_check_tags_extra_packable(t)) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunk_id */
+ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+ if (t->extra_is_shrink)
+ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+ if (t->extra_shadows)
+ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ ptt->n_bytes = t->extra_equiv_id;
+ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ ptt->n_bytes = (unsigned) t->extra_file_size;
+ else
+ ptt->n_bytes = 0;
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc)
+{
+ yaffs_pack_tags2_tags_only(&pt->t, t);
+
+ if (tags_ecc)
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *ptt)
+{
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+ if (ptt->seq_number == 0xffffffff)
+ return;
+
+ t->block_bad = 0;
+ t->chunk_used = 1;
+ t->obj_id = ptt->obj_id;
+ t->chunk_id = ptt->chunk_id;
+ t->n_bytes = ptt->n_bytes;
+ t->is_deleted = 0;
+ t->serial_number = 0;
+ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+ t->chunk_id = 0;
+ t->n_bytes = 0;
+
+ t->extra_available = 1;
+ t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+ t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0;
+ t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0;
+ t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ t->extra_equiv_id = ptt->n_bytes;
+ else
+ t->extra_file_size = ptt->n_bytes;
+ }
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc)
+{
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ if (pt->t.seq_number != 0xffffffff && tags_ecc) {
+ /* Chunk is in use and we need to do ECC */
+
+ struct yaffs_ecc_other ecc;
+ int result;
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &ecc);
+ result =
+ yaffs_ecc_correct_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &pt->ecc, &ecc);
+ switch (result) {
+ case 0:
+ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
+ yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+ t->ecc_result = ecc_result;
+
+ yaffs_dump_packed_tags2(pt);
+ yaffs_dump_tags2(t);
+}
diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h
new file mode 100644
index 000000000000..675e719460a8
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+ unsigned seq_number;
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+ struct yaffs_packed_tags2_tags_only t;
+ struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_summary.c b/fs/yaffs2/yaffs_summary.c
new file mode 100644
index 000000000000..6f3c7839fa6f
--- /dev/null
+++ b/fs/yaffs2/yaffs_summary.c
@@ -0,0 +1,313 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Summaries write the useful part of the tags for the chunks in a block into an
+ * an array which is written to the last n chunks of the block.
+ * Reading the summaries gives all the tags for the block in one read. Much
+ * faster.
+ *
+ * Chunks holding summaries are marked with tags making it look like
+ * they are part of a fake file.
+ *
+ * The summary could also be used during gc.
+ *
+ */
+
+#include "yaffs_summary.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_bitmap.h"
+
+/*
+ * The summary is built up in an array of summary tags.
+ * This gets written to the last one or two (maybe more) chunks in a block.
+ * A summary header is written as the first part of each chunk of summary data.
+ * The summary header must match or the summary is rejected.
+ */
+
+/* Summary tags don't need the sequence number because that is redundant. */
+struct yaffs_summary_tags {
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+/* Summary header */
+struct yaffs_summary_header {
+ unsigned version; /* Must match current version */
+ unsigned block; /* Must be this block */
+ unsigned seq; /* Must be this sequence number */
+ unsigned sum; /* Just add up all the bytes in the tags */
+};
+
+
+static void yaffs_summary_clear(struct yaffs_dev *dev)
+{
+ if (!dev->sum_tags)
+ return;
+ memset(dev->sum_tags, 0, dev->chunks_per_summary *
+ sizeof(struct yaffs_summary_tags));
+}
+
+
+void yaffs_summary_deinit(struct yaffs_dev *dev)
+{
+ kfree(dev->sum_tags);
+ dev->sum_tags = NULL;
+ kfree(dev->gc_sum_tags);
+ dev->gc_sum_tags = NULL;
+ dev->chunks_per_summary = 0;
+}
+
+int yaffs_summary_init(struct yaffs_dev *dev)
+{
+ int sum_bytes;
+ int chunks_used; /* Number of chunks used by summary */
+ int sum_tags_bytes;
+
+ sum_bytes = dev->param.chunks_per_block *
+ sizeof(struct yaffs_summary_tags);
+
+ chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/
+ (dev->data_bytes_per_chunk -
+ sizeof(struct yaffs_summary_header));
+
+ dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used;
+ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+ dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+ if (!dev->sum_tags || !dev->gc_sum_tags) {
+ yaffs_summary_deinit(dev);
+ return YAFFS_FAIL;
+ }
+
+ yaffs_summary_clear(dev);
+
+ return YAFFS_OK;
+}
+
+static unsigned yaffs_summary_sum(struct yaffs_dev *dev)
+{
+ u8 *sum_buffer = (u8 *)dev->sum_tags;
+ int i;
+ unsigned sum = 0;
+
+ i = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ while (i > 0) {
+ sum += *sum_buffer;
+ sum_buffer++;
+ i--;
+ }
+
+ return sum;
+}
+
+static int yaffs_summary_write(struct yaffs_dev *dev, int blk)
+{
+ struct yaffs_ext_tags tags;
+ u8 *buffer;
+ u8 *sum_buffer = (u8 *)dev->sum_tags;
+ int n_bytes;
+ int chunk_in_nand;
+ int chunk_in_block;
+ int result;
+ int this_tx;
+ struct yaffs_summary_header hdr;
+ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+
+ buffer = yaffs_get_temp_buffer(dev);
+ n_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ memset(&tags, 0, sizeof(struct yaffs_ext_tags));
+ tags.obj_id = YAFFS_OBJECTID_SUMMARY;
+ tags.chunk_id = 1;
+ chunk_in_block = dev->chunks_per_summary;
+ chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block +
+ dev->chunks_per_summary;
+ hdr.version = YAFFS_SUMMARY_VERSION;
+ hdr.block = blk;
+ hdr.seq = bi->seq_number;
+ hdr.sum = yaffs_summary_sum(dev);
+
+ do {
+ this_tx = n_bytes;
+ if (this_tx > sum_bytes_per_chunk)
+ this_tx = sum_bytes_per_chunk;
+ memcpy(buffer, &hdr, sizeof(hdr));
+ memcpy(buffer + sizeof(hdr), sum_buffer, this_tx);
+ tags.n_bytes = this_tx + sizeof(hdr);
+ result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand,
+ buffer, &tags);
+
+ if (result != YAFFS_OK)
+ break;
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+ dev->n_free_chunks--;
+
+ n_bytes -= this_tx;
+ sum_buffer += this_tx;
+ chunk_in_nand++;
+ chunk_in_block++;
+ tags.chunk_id++;
+ } while (result == YAFFS_OK && n_bytes > 0);
+ yaffs_release_temp_buffer(dev, buffer);
+
+
+ if (result == YAFFS_OK)
+ bi->has_summary = 1;
+
+
+ return result;
+}
+
+int yaffs_summary_read(struct yaffs_dev *dev,
+ struct yaffs_summary_tags *st,
+ int blk)
+{
+ struct yaffs_ext_tags tags;
+ u8 *buffer;
+ u8 *sum_buffer = (u8 *)st;
+ int n_bytes;
+ int chunk_id;
+ int chunk_in_nand;
+ int chunk_in_block;
+ int result;
+ int this_tx;
+ struct yaffs_summary_header hdr;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+ int sum_tags_bytes;
+
+ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ buffer = yaffs_get_temp_buffer(dev);
+ n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary;
+ chunk_in_block = dev->chunks_per_summary;
+ chunk_in_nand = blk * dev->param.chunks_per_block +
+ dev->chunks_per_summary;
+ chunk_id = 1;
+ do {
+ this_tx = n_bytes;
+ if (this_tx > sum_bytes_per_chunk)
+ this_tx = sum_bytes_per_chunk;
+ result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand,
+ buffer, &tags);
+
+ if (tags.chunk_id != chunk_id ||
+ tags.obj_id != YAFFS_OBJECTID_SUMMARY ||
+ tags.chunk_used == 0 ||
+ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+ tags.n_bytes != (this_tx + sizeof(hdr)))
+ result = YAFFS_FAIL;
+ if (result != YAFFS_OK)
+ break;
+
+ if (st == dev->sum_tags) {
+ /* If we're scanning then update the block info */
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+ }
+ memcpy(&hdr, buffer, sizeof(hdr));
+ memcpy(sum_buffer, buffer + sizeof(hdr), this_tx);
+ n_bytes -= this_tx;
+ sum_buffer += this_tx;
+ chunk_in_nand++;
+ chunk_in_block++;
+ chunk_id++;
+ } while (result == YAFFS_OK && n_bytes > 0);
+ yaffs_release_temp_buffer(dev, buffer);
+
+ if (result == YAFFS_OK) {
+ /* Verify header */
+ if (hdr.version != YAFFS_SUMMARY_VERSION ||
+ hdr.block != blk ||
+ hdr.seq != bi->seq_number ||
+ hdr.sum != yaffs_summary_sum(dev))
+ result = YAFFS_FAIL;
+ }
+
+ if (st == dev->sum_tags && result == YAFFS_OK)
+ bi->has_summary = 1;
+
+ return result;
+}
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_nand)
+{
+ struct yaffs_packed_tags2_tags_only tags_only;
+ struct yaffs_summary_tags *sum_tags;
+ int block_in_nand = chunk_in_nand / dev->param.chunks_per_block;
+ int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block;
+
+ if (!dev->sum_tags)
+ return YAFFS_OK;
+
+ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+ yaffs_pack_tags2_tags_only(&tags_only, tags);
+ sum_tags = &dev->sum_tags[chunk_in_block];
+ sum_tags->chunk_id = tags_only.chunk_id;
+ sum_tags->n_bytes = tags_only.n_bytes;
+ sum_tags->obj_id = tags_only.obj_id;
+
+ if (chunk_in_block == dev->chunks_per_summary - 1) {
+ /* Time to write out the summary */
+ yaffs_summary_write(dev, block_in_nand);
+ yaffs_summary_clear(dev);
+ yaffs_skip_rest_of_block(dev);
+ }
+ }
+ return YAFFS_OK;
+}
+
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block)
+{
+ struct yaffs_packed_tags2_tags_only tags_only;
+ struct yaffs_summary_tags *sum_tags;
+ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+ sum_tags = &dev->sum_tags[chunk_in_block];
+ tags_only.chunk_id = sum_tags->chunk_id;
+ tags_only.n_bytes = sum_tags->n_bytes;
+ tags_only.obj_id = sum_tags->obj_id;
+ yaffs_unpack_tags2_tags_only(tags, &tags_only);
+ return YAFFS_OK;
+ }
+ return YAFFS_FAIL;
+}
+
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+ int i;
+
+ if (!bi->has_summary)
+ return;
+
+ for (i = dev->chunks_per_summary;
+ i < dev->param.chunks_per_block;
+ i++) {
+ if (yaffs_check_chunk_bit(dev, blk, i)) {
+ yaffs_clear_chunk_bit(dev, blk, i);
+ bi->pages_in_use--;
+ dev->n_free_chunks++;
+ }
+ }
+}
diff --git a/fs/yaffs2/yaffs_summary.h b/fs/yaffs2/yaffs_summary.h
new file mode 100644
index 000000000000..be141d073369
--- /dev/null
+++ b/fs/yaffs2/yaffs_summary.h
@@ -0,0 +1,37 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_SUMMARY_H__
+#define __YAFFS_SUMMARY_H__
+
+#include "yaffs_packedtags2.h"
+
+
+int yaffs_summary_init(struct yaffs_dev *dev);
+void yaffs_summary_deinit(struct yaffs_dev *dev);
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block);
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block);
+int yaffs_summary_read(struct yaffs_dev *dev,
+ struct yaffs_summary_tags *st,
+ int blk);
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
+
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c
new file mode 100644
index 000000000000..092430beccb7
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,381 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+
+
+/********** Tags ECC calculations *********/
+
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+ /* Calculate an ecc */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+
+ tags->ecc = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+ if (b[i] & j)
+ ecc ^= bit;
+ }
+ }
+ tags->ecc = ecc;
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+ unsigned ecc = tags->ecc;
+
+ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+ /* Wierd ecc failure value */
+ /* TODO Need to do somethiong here */
+ return -1; /* unrecovered error */
+ }
+ return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+ yaffs_calc_tags_ecc(tags_ptr);
+
+ spare_ptr->tb0 = tu->as_bytes[0];
+ spare_ptr->tb1 = tu->as_bytes[1];
+ spare_ptr->tb2 = tu->as_bytes[2];
+ spare_ptr->tb3 = tu->as_bytes[3];
+ spare_ptr->tb4 = tu->as_bytes[4];
+ spare_ptr->tb5 = tu->as_bytes[5];
+ spare_ptr->tb6 = tu->as_bytes[6];
+ spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+ struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+ int result;
+
+ tu->as_bytes[0] = spare_ptr->tb0;
+ tu->as_bytes[1] = spare_ptr->tb1;
+ tu->as_bytes[2] = spare_ptr->tb2;
+ tu->as_bytes[3] = spare_ptr->tb3;
+ tu->as_bytes[4] = spare_ptr->tb4;
+ tu->as_bytes[5] = spare_ptr->tb5;
+ tu->as_bytes[6] = spare_ptr->tb6;
+ tu->as_bytes[7] = spare_ptr->tb7;
+
+ result = yaffs_check_tags_ecc(tags_ptr);
+ if (result > 0)
+ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+ dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+ memset(spare, 0xff, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ struct yaffs_spare *spare)
+{
+ int data_size = dev->data_bytes_per_chunk;
+
+ return dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *) spare, sizeof(*spare));
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data,
+ struct yaffs_spare *spare,
+ enum yaffs_ecc_result *ecc_result,
+ int correct_errors)
+{
+ int ret_val;
+ struct yaffs_spare local_spare;
+ int data_size;
+ int spare_size;
+ int ecc_result1, ecc_result2;
+ u8 calc_ecc[3];
+
+ if (!spare) {
+ /* If we don't have a real spare, then we use a local one. */
+ /* Need this for the calculation of the ecc */
+ spare = &local_spare;
+ }
+ data_size = dev->data_bytes_per_chunk;
+ spare_size = sizeof(struct yaffs_spare);
+
+ if (dev->param.use_nand_ecc)
+ return dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *) spare, spare_size,
+ ecc_result);
+
+
+ /* Handle the ECC at this level. */
+
+ ret_val = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *)spare, spare_size,
+ NULL);
+ if (!data || !correct_errors)
+ return ret_val;
+
+ /* Do ECC correction if needed. */
+ yaffs_ecc_calc(data, calc_ecc);
+ ecc_result1 = yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+ yaffs_ecc_calc(&data[256], calc_ecc);
+ ecc_result2 = yaffs_ecc_correct(&data[256], spare->ecc2, calc_ecc);
+
+ if (ecc_result1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (ecc_result1 < 0 || ecc_result2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (ecc_result1 > 0 || ecc_result2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+ yaffs_get_block_info(dev, flash_block + dev->block_offset)->
+ needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>>Block %d marked for retirement",
+ flash_block);
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+ * then retire the block
+ * NB recursion
+ */
+}
+
+static int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+
+ yaffs_spare_init(&spare);
+
+ if (ext_tags->is_deleted)
+ spare.page_status = 0;
+ else {
+ tags.obj_id = ext_tags->obj_id;
+ tags.chunk_id = ext_tags->chunk_id;
+
+ tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1);
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+ else
+ tags.n_bytes_msb = 3;
+
+ tags.serial_number = ext_tags->serial_number;
+
+ if (!dev->param.use_nand_ecc && data) {
+ yaffs_ecc_calc(data, spare.ecc1);
+ yaffs_ecc_calc(&data[256], spare.ecc2);
+ }
+
+ yaffs_load_tags_to_spare(&spare, &tags);
+ }
+ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+static int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ int deleted;
+
+ if (!init) {
+ memset(&spare_ff, 0xff, sizeof(spare_ff));
+ init = 1;
+ }
+
+ if (!yaffs_rd_chunk_nand(dev, nand_chunk,
+ data, &spare, &ecc_result, 1))
+ return YAFFS_FAIL;
+
+ /* ext_tags may be NULL */
+ if (!ext_tags)
+ return YAFFS_OK;
+
+ deleted = (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+ ext_tags->is_deleted = deleted;
+ ext_tags->ecc_result = ecc_result;
+ ext_tags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+ ext_tags->chunk_used =
+ memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0;
+
+ if (ext_tags->chunk_used) {
+ yaffs_get_tags_from_spare(dev, &spare, &tags);
+ ext_tags->obj_id = tags.obj_id;
+ ext_tags->chunk_id = tags.chunk_id;
+ ext_tags->n_bytes = tags.n_bytes_lsb;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ ext_tags->n_bytes |=
+ (((unsigned)tags.n_bytes_msb) << 10);
+
+ ext_tags->serial_number = tags.serial_number;
+ }
+
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_spare spare;
+
+ memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+ spare.block_status = 'Y';
+
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ struct yaffs_spare spare0, spare1;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ enum yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spare_ff, 0xff, sizeof(spare_ff));
+ init = 1;
+ }
+
+ *seq_number = 0;
+
+ /* Look for bad block markers in the first two chunks */
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block,
+ NULL, &spare0, &dummy, 0);
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+ NULL, &spare1, &dummy, 0);
+
+ if (hweight8(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ else
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+
+ return YAFFS_OK;
+}
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev)
+{
+ if(dev->param.is_yaffs2)
+ return;
+ if(!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_wr;
+ if(!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_rd;
+ if(!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
+ if(!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
+}
diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h
new file mode 100644
index 000000000000..92d298a6f582
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+
+#include "yaffs_guts.h"
+
+#if 0
+
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number);
+
+#endif
+
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev);
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagsmarshall.c b/fs/yaffs2/yaffs_tagsmarshall.c
new file mode 100644
index 000000000000..44a83b12ca4d
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsmarshall.c
@@ -0,0 +1,199 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_packedtags2.h"
+
+static int yaffs_tags_marshall_write(struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags2 pt;
+ int retval;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "yaffs_tags_marshall_write chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+ * the end of the data buffer.
+ */
+ if (!data || !tags)
+ BUG();
+ else if (dev->param.inband_tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)(data +
+ dev->
+ data_bytes_per_chunk);
+ yaffs_pack_tags2_tags_only(pt2tp, tags);
+ } else {
+ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+ }
+
+ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ (dev->param.inband_tags) ? NULL : packed_tags_ptr,
+ (dev->param.inband_tags) ? 0 : packed_tags_size);
+
+ return retval;
+}
+
+static int yaffs_tags_marshall_read(struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = 0;
+ int local_data = 0;
+ u8 spare_buffer[100];
+ enum yaffs_ecc_result ecc_result;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "yaffs_tags_marshall_read chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ if (dev->param.inband_tags) {
+ if (!data) {
+ local_data = 1;
+ data = yaffs_get_temp_buffer(dev);
+ }
+ }
+
+ if (dev->param.inband_tags || (data && !tags))
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ NULL, 0,
+ &ecc_result);
+ else if (tags)
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ spare_buffer, packed_tags_size,
+ &ecc_result);
+ else
+ BUG();
+
+
+ if (dev->param.inband_tags) {
+ if (tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)
+ &data[dev->data_bytes_per_chunk];
+ yaffs_unpack_tags2_tags_only(tags, pt2tp);
+ }
+ } else if (tags) {
+ memcpy(packed_tags_ptr, spare_buffer, packed_tags_size);
+ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+
+ if (local_data)
+ yaffs_release_temp_buffer(dev, data);
+
+ if (tags && ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
+ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ dev->n_ecc_unfixed++;
+ }
+
+ if (tags && ecc_result == -YAFFS_ECC_RESULT_FIXED) {
+ if (tags->ecc_result <= YAFFS_ECC_RESULT_NO_ERROR)
+ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ }
+
+ if (ecc_result < YAFFS_ECC_RESULT_UNFIXED)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+static int yaffs_tags_marshall_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "yaffs_tags_marshall_query_block %d",
+ block_no);
+
+ retval = dev->drv.drv_check_bad_fn(dev, block_no);
+
+ if (retval== YAFFS_FAIL) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ *seq_number = 0;
+ } else {
+ struct yaffs_ext_tags t;
+
+ yaffs_tags_marshall_read(dev,
+ block_no * dev->param.chunks_per_block,
+ NULL, &t);
+
+ if (t.chunk_used) {
+ *seq_number = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else {
+ *seq_number = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block query returns seq %d state %d",
+ *seq_number, *state);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+static int yaffs_tags_marshall_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ return dev->drv.drv_mark_bad_fn(dev, block_no);
+
+}
+
+
+void yaffs_tags_marshall_install(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_marshall_write;
+
+ if (!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_marshall_read;
+
+ if (!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_marshall_query_block;
+
+ if (!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_marshall_mark_bad;
+
+}
diff --git a/fs/yaffs2/yaffs_tagsmarshall.h b/fs/yaffs2/yaffs_tagsmarshall.h
new file mode 100644
index 000000000000..bf3e68a1a5be
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsmarshall.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSMARSHALL_H__
+#define __YAFFS_TAGSMARSHALL_H__
+
+#include "yaffs_guts.h"
+void yaffs_tags_marshall_install(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h
new file mode 100644
index 000000000000..fd26054d3912
--- /dev/null
+++ b/fs/yaffs2/yaffs_trace.h
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS 0x00000002
+#define YAFFS_TRACE_ALLOCATE 0x00000004
+#define YAFFS_TRACE_SCAN 0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+#define YAFFS_TRACE_ERASE 0x00000020
+#define YAFFS_TRACE_GC 0x00000040
+#define YAFFS_TRACE_WRITE 0x00000080
+#define YAFFS_TRACE_TRACING 0x00000100
+#define YAFFS_TRACE_DELETION 0x00000200
+#define YAFFS_TRACE_BUFFERS 0x00000400
+#define YAFFS_TRACE_NANDACCESS 0x00000800
+#define YAFFS_TRACE_GC_DETAIL 0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+#define YAFFS_TRACE_MTD 0x00004000
+#define YAFFS_TRACE_CHECKPOINT 0x00008000
+
+#define YAFFS_TRACE_VERIFY 0x00010000
+#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+#define YAFFS_TRACE_VERIFY_ALL 0x000f0000
+
+#define YAFFS_TRACE_SYNC 0x00100000
+#define YAFFS_TRACE_BACKGROUND 0x00200000
+#define YAFFS_TRACE_LOCK 0x00400000
+#define YAFFS_TRACE_MOUNT 0x00800000
+
+#define YAFFS_TRACE_ERROR 0x40000000
+#define YAFFS_TRACE_BUG 0x80000000
+#define YAFFS_TRACE_ALWAYS 0xf0000000
+
+#endif
diff --git a/fs/yaffs2/yaffs_verify.c b/fs/yaffs2/yaffs_verify.c
new file mode 100644
index 000000000000..e8f2f0a6c2be
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.c
@@ -0,0 +1,529 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask &
+ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char * const block_state_name[] = {
+ "Unknown",
+ "Needs scan",
+ "Scanning",
+ "Empty",
+ "Allocating",
+ "Full",
+ "Dirty",
+ "Checkpoint",
+ "Collecting",
+ "Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+ int actually_used;
+ int in_use;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Report illegal runtime states */
+ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has undefined state %d",
+ n, bi->block_state);
+
+ switch (bi->block_state) {
+ case YAFFS_BLOCK_STATE_UNKNOWN:
+ case YAFFS_BLOCK_STATE_SCANNING:
+ case YAFFS_BLOCK_STATE_NEEDS_SCAN:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has bad run-state %s",
+ n, block_state_name[bi->block_state]);
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->pages_in_use < 0 ||
+ bi->pages_in_use > dev->param.chunks_per_block ||
+ bi->soft_del_pages < 0 ||
+ bi->soft_del_pages > dev->param.chunks_per_block ||
+ actually_used < 0 || actually_used > dev->param.chunks_per_block)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
+ n, bi->pages_in_use, bi->soft_del_pages);
+
+ /* Check chunk bitmap legal */
+ in_use = yaffs_count_chunk_bits(dev, n);
+ if (in_use != bi->pages_in_use)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+ n, bi->pages_in_use, in_use);
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n)
+{
+ yaffs_verify_blk(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+
+ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Block %d is in state %d after gc, should be erased",
+ n, bi->block_state);
+ }
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+ int i;
+ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int illegal_states = 0;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ memset(state_count, 0, sizeof(state_count));
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ yaffs_verify_blk(dev, bi, i);
+
+ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+ state_count[bi->block_state]++;
+ else
+ illegal_states++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
+
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%d blocks have illegal states",
+ illegal_states);
+ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many allocating blocks");
+
+ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%s %d blocks",
+ block_state_name[i], state_count[i]);
+
+ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Checkpoint block count wrong dev %d count %d",
+ dev->blocks_in_checkpt,
+ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Erased block count wrong dev %d count %d",
+ dev->n_erased_blocks,
+ state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many collecting blocks %d (max is 1)",
+ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in
+ * which case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!(tags && obj && oh)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Verifying object header tags %p obj %p oh %p",
+ tags, obj, oh);
+ return;
+ }
+
+ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+ oh->type > YAFFS_OBJECT_TYPE_MAX)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header type is illegal value 0x%x",
+ tags->obj_id, oh->type);
+
+ if (tags->obj_id != obj->obj_id)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch obj_id %d",
+ tags->obj_id, obj->obj_id);
+
+ /*
+ * Check that the object's parent ids match if parent_check requested.
+ *
+ * Tests do not apply to the root object.
+ */
+
+ if (parent_check && tags->obj_id > 1 && !obj->parent)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d obj->parent is NULL",
+ tags->obj_id, oh->parent_obj_id);
+
+ if (parent_check && obj->parent &&
+ oh->parent_obj_id != obj->parent->obj_id &&
+ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d parent_obj_id %d",
+ tags->obj_id, oh->parent_obj_id,
+ obj->parent->obj_id);
+
+ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is NULL",
+ obj->obj_id);
+
+ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Junk name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is 0xff",
+ obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+ u32 x;
+ int required_depth;
+ int actual_depth;
+ int last_chunk;
+ u32 offset_in_chunk;
+ u32 the_chunk;
+
+ u32 i;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ struct yaffs_tnode *tn;
+ u32 obj_id;
+
+ if (!obj)
+ return;
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ dev = obj->my_dev;
+ obj_id = obj->obj_id;
+
+
+ /* Check file size is consistent with tnode depth */
+ yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size,
+ &last_chunk, &offset_in_chunk);
+ last_chunk++;
+ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ actual_depth = obj->variant.file_variant.top_level;
+
+ /* Check that the chunks in the tnode tree are all correct.
+ * We do this by scanning through the tnode tree and
+ * checking the tags for every chunk match.
+ */
+
+ if (yaffs_skip_nand_verification(dev))
+ return;
+
+ for (i = 1; i <= last_chunk; i++) {
+ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+ if (!tn)
+ continue;
+
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk > 0) {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ &tags);
+ if (tags.obj_id != obj_id || tags.chunk_id != i)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+ obj_id, i, the_chunk,
+ tags.obj_id, tags.chunk_id);
+ }
+ }
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+ u32 chunk_min;
+ u32 chunk_max;
+ u32 chunk_id_ok;
+ u32 chunk_in_range;
+ u32 chunk_wrongly_deleted;
+ u32 chunk_valid;
+
+ if (!obj)
+ return;
+
+ if (obj->being_created)
+ return;
+
+ dev = obj->my_dev;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Check sane object header chunk */
+
+ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+ chunk_max =
+ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+ chunk_valid = chunk_in_range &&
+ yaffs_check_chunk_bit(dev,
+ obj->hdr_chunk / dev->param.chunks_per_block,
+ obj->hdr_chunk % dev->param.chunks_per_block);
+ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has chunk_id %d %s %s",
+ obj->obj_id, obj->hdr_chunk,
+ chunk_id_ok ? "" : ",out of range",
+ chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj_hdr *oh;
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+ yaffs_verify_oh(obj, oh, &tags, 1);
+
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+
+ /* Verify it has a parent */
+ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has parent pointer %p which does not look like an object",
+ obj->obj_id, obj->parent);
+ }
+
+ /* Verify parent is a directory */
+ if (obj->parent &&
+ obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d's parent is not a directory (type %d)",
+ obj->obj_id, obj->parent->variant_type);
+ }
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_verify_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_verify_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_verify_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_verify_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_verify_special(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has illegaltype %d",
+ obj->obj_id, obj->variant_type);
+ break;
+ }
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int i;
+ struct list_head *lh;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ yaffs_verify_obj(obj);
+ }
+ }
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+ int count = 0;
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+ BUG();
+ return;
+ }
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!obj->parent) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent");
+ BUG();
+ return;
+ }
+
+ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+ BUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ yaffs_verify_obj(list_obj);
+ if (obj == list_obj)
+ count++;
+ }
+
+ if (count != 1) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory %d times",
+ count);
+ BUG();
+ }
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ if (!directory) {
+ BUG();
+ return;
+ }
+
+ if (yaffs_skip_full_verification(directory->my_dev))
+ return;
+
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Directory has wrong type: %d",
+ directory->variant_type);
+ BUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &directory->variant.dir_variant.children) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (list_obj->parent != directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory list has wrong parent %p",
+ list_obj->parent);
+ BUG();
+ }
+ yaffs_verify_obj_in_dir(list_obj);
+ }
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+ int counted;
+ int difference;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ counted = yaffs_count_free_chunks(dev);
+
+ difference = dev->n_free_chunks - counted;
+
+ if (difference) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Freechunks verification failure %d %d %d",
+ dev->n_free_chunks, counted, difference);
+ yaffs_free_verification_failures++;
+ }
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+ (void) in;
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_verify.h b/fs/yaffs2/yaffs_verify.h
new file mode 100644
index 000000000000..4f4af8d29aa4
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.h
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+ int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
new file mode 100644
index 000000000000..61ac4e4c45d1
--- /dev/null
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -0,0 +1,3676 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
+ * this superblock
+ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this
+ * superblock
+ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
+ */
+
+/*
+ * There are two variants of the VFS glue code. This variant should compile
+ * for any version of Linux.
+ */
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
+#define YAFFS_COMPILE_BACKGROUND
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23))
+#define YAFFS_COMPILE_FREEZER
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#define YAFFS_COMPILE_EXPORTFS
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
+#define YAFFS_USE_SETATTR_COPY
+#define YAFFS_USE_TRUNCATE_SETSIZE
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
+#define YAFFS_HAS_EVICT_INODE
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+#define YAFFS_NEW_FOLLOW_LINK 1
+#else
+#define YAFFS_NEW_FOLLOW_LINK 0
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define YAFFS_HAS_WRITE_SUPER
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+#include <linux/config.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
+#include <linux/smp_lock.h>
+#endif
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+#include <linux/namei.h>
+#endif
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+#include <linux/exportfs.h>
+#endif
+
+#ifdef YAFFS_COMPILE_BACKGROUND
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#endif
+#ifdef YAFFS_COMPILE_FREEZER
+#include <linux/freezer.h>
+#endif
+
+#include <asm/div64.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+#include <linux/statfs.h>
+
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+/* FIXME: use sb->s_id instead ? */
+#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+
+#else
+
+#include <linux/locks.h>
+#define BDEVNAME_SIZE 0
+#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+#define __user
+#endif
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#define YPROC_ROOT (&proc_root)
+#else
+#define YPROC_ROOT NULL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#define Y_INIT_TIMER(a) init_timer(a)
+#else
+#define Y_INIT_TIMER(a) timer_setup_on_stack(a, NULL, 0)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
+#define YAFFS_USE_WRITE_BEGIN_END 1
+#else
+#define YAFFS_USE_WRITE_BEGIN_END 0
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define YAFFS_SUPER_HAS_DIRTY
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define set_nlink(inode, count) do { (inode)->i_nlink = (count); } while(0)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
+static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+{
+ uint64_t result = partition_size;
+ do_div(result, block_size);
+ return (uint32_t) result;
+}
+#else
+#define YCALCBLOCKS(s, b) ((s)/(b))
+#endif
+
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+#include <linux/cred.h>
+
+#include <uapi/linux/mount.h>
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#include "yaffs_linux.h"
+
+#include "yaffs_mtdif.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_getblockinfo.h"
+
+unsigned int yaffs_trace_mask =
+ YAFFS_TRACE_BAD_BLOCKS |
+ YAFFS_TRACE_ALWAYS |
+ 0;
+
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+unsigned int yaffs_auto_checkpoint = 1;
+unsigned int yaffs_gc_control = 1;
+unsigned int yaffs_bg_enable = 1;
+unsigned int yaffs_auto_select = 1;
+/* Module Parameters */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+module_param(yaffs_trace_mask, uint, 0644);
+module_param(yaffs_wr_attempts, uint, 0644);
+module_param(yaffs_auto_checkpoint, uint, 0644);
+module_param(yaffs_gc_control, uint, 0644);
+module_param(yaffs_bg_enable, uint, 0644);
+#else
+MODULE_PARM(yaffs_trace_mask, "i");
+MODULE_PARM(yaffs_wr_attempts, "i");
+MODULE_PARM(yaffs_auto_checkpoint, "i");
+MODULE_PARM(yaffs_gc_control, "i");
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+/* use iget and read_inode */
+#define Y_IGET(sb, inum) iget((sb), (inum))
+
+#else
+/* Call local equivalent */
+#define YAFFS_USE_OWN_IGET
+#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
+
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
+#else
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip)
+#endif
+
+#define yaffs_inode_to_obj(iptr) \
+ ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
+#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info)
+#else
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->u.generic_sbp)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#define Y_CLEAR_INODE(i) clear_inode(i)
+#else
+#define Y_CLEAR_INODE(i) end_writeback(i)
+#endif
+
+
+#define update_dir_time(dir) do {\
+ (dir)->i_ctime = (dir)->i_mtime = current_time(dir); \
+ } while (0)
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj);
+
+
+static void yaffs_gross_lock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
+ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
+}
+
+static void yaffs_gross_unlock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
+ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
+}
+
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+ /* Lifted from jffs2 */
+
+ struct yaffs_obj *obj;
+ unsigned char *pg_buf;
+ int ret;
+ loff_t pos = ((loff_t) pg->index) << PAGE_SHIFT;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readpage_nolock at %lld, size %08x",
+ (long long)pos,
+ (unsigned)PAGE_SIZE);
+
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
+
+ dev = obj->my_dev;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ BUG_ON(!PageLocked(pg));
+#else
+ if (!PageLocked(pg))
+ PAGE_BUG(pg);
+#endif
+
+ pg_buf = kmap(pg);
+ /* FIXME: Can kmap fail? */
+
+ yaffs_gross_lock(dev);
+
+ ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_SIZE);
+
+ yaffs_gross_unlock(dev);
+
+ if (ret >= 0)
+ ret = 0;
+
+ if (ret) {
+ ClearPageUptodate(pg);
+ SetPageError(pg);
+ } else {
+ SetPageUptodate(pg);
+ ClearPageError(pg);
+ }
+
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
+ return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+ int ret = yaffs_readpage_nolock(f, pg);
+ UnlockPage(pg);
+ return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+ int ret;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
+ ret = yaffs_readpage_unlock(f, pg);
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
+ return ret;
+}
+
+
+static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val)
+{
+ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
+
+ if (lc)
+ lc->dirty = val;
+
+# ifdef YAFFS_SUPER_HAS_DIRTY
+ {
+ struct super_block *sb = lc->super;
+
+ if (sb)
+ sb->s_dirt = val;
+ }
+#endif
+
+}
+
+static void yaffs_set_super_dirty(struct yaffs_dev *dev)
+{
+ yaffs_set_super_dirty_val(dev, 1);
+}
+
+static void yaffs_clear_super_dirty(struct yaffs_dev *dev)
+{
+ yaffs_set_super_dirty_val(dev, 0);
+}
+
+static int yaffs_check_super_dirty(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
+
+ if (lc && lc->dirty)
+ return 1;
+
+# ifdef YAFFS_SUPER_HAS_DIRTY
+ {
+ struct super_block *sb = lc->super;
+
+ if (sb && sb->s_dirt)
+ return 1;
+ }
+#endif
+ return 0;
+
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+#else
+static int yaffs_writepage(struct page *page)
+#endif
+{
+ struct yaffs_dev *dev;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ unsigned long end_index;
+ char *buffer;
+ struct yaffs_obj *obj;
+ int n_written = 0;
+ unsigned n_bytes;
+ loff_t i_size;
+
+ if (!mapping)
+ BUG();
+ inode = mapping->host;
+ if (!inode)
+ BUG();
+ i_size = i_size_read(inode);
+
+ end_index = i_size >> PAGE_SHIFT;
+
+ if (page->index < end_index)
+ n_bytes = PAGE_SIZE;
+ else {
+ n_bytes = i_size & (PAGE_SIZE - 1);
+
+ if (page->index > end_index || !n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %lld, inode size = %lld!!",
+ ((loff_t)page->index) << PAGE_SHIFT,
+ inode->i_size);
+ yaffs_trace(YAFFS_TRACE_OS,
+ " -> don't care!!");
+
+ zero_user_segment(page, 0, PAGE_SIZE);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ return 0;
+ }
+ }
+
+ if (n_bytes != PAGE_SIZE)
+ zero_user_segment(page, n_bytes, PAGE_SIZE);
+
+ get_page(page);
+
+ buffer = kmap(page);
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %lld, size %08x",
+ ((loff_t)page->index) << PAGE_SHIFT, n_bytes);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag0: obj = %lld, ino = %lld",
+ obj->variant.file_variant.file_size, inode->i_size);
+
+ n_written = yaffs_wr_file(obj, buffer,
+ ((loff_t)page->index) << PAGE_SHIFT, n_bytes, 0);
+
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag1: obj = %lld, ino = %lld",
+ obj->variant.file_variant.file_size, inode->i_size);
+
+ yaffs_gross_unlock(dev);
+
+ kunmap(page);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ put_page(page);
+
+ return (n_written == n_bytes) ? 0 : -ENOSPC;
+}
+
+/* Space holding and freeing is done to ensure we have space available for write_begin/end */
+/* For now we just assume few parallel writes and check against a small number. */
+/* Todo: need to do this with a counter to handle parallel reads better */
+
+static ssize_t yaffs_hold_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ int n_free_chunks;
+
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ n_free_chunks = yaffs_get_n_free_chunks(dev);
+
+ yaffs_gross_unlock(dev);
+
+ return (n_free_chunks > 20) ? 1 : 0;
+}
+
+static void yaffs_release_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_gross_unlock(dev);
+}
+
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct page *pg = NULL;
+ pgoff_t index = pos >> PAGE_SHIFT;
+
+ int ret = 0;
+ int space_held = 0;
+
+ /* Get a page */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+#else
+ pg = __grab_cache_page(mapping, index);
+#endif
+
+ *pagep = pg;
+ if (!pg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "start yaffs_write_begin index %d(%x) uptodate %d",
+ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
+
+ /* Get fs space */
+ space_held = yaffs_hold_space(filp);
+
+ if (!space_held) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Update page if required */
+
+ if (!Page_Uptodate(pg))
+ ret = yaffs_readpage_nolock(filp, pg);
+
+ if (ret)
+ goto out;
+
+ /* Happy path return */
+ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
+
+ return 0;
+
+out:
+ yaffs_trace(YAFFS_TRACE_OS,
+ "end yaffs_write_begin fail returning %d", ret);
+ if (space_held)
+ yaffs_release_space(filp);
+ if (pg) {
+ unlock_page(pg);
+ put_page(pg);
+ }
+ return ret;
+}
+
+#else
+
+static int yaffs_prepare_write(struct file *f, struct page *pg,
+ unsigned offset, unsigned to)
+{
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write");
+
+ if (!Page_Uptodate(pg))
+ return yaffs_readpage_nolock(f, pg);
+ return 0;
+}
+#endif
+
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+ loff_t * pos)
+{
+ struct yaffs_obj *obj;
+ int n_written;
+ loff_t ipos;
+ struct inode *inode;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: hey obj is null!");
+ return -EINVAL;
+ }
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ inode = f->f_path.dentry->d_inode;
+
+ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+ ipos = inode->i_size;
+ else
+ ipos = *pos;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld",
+ (unsigned)n, (unsigned)n, obj->obj_id, ipos);
+
+ n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
+
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: %d(%x) bytes written",
+ (unsigned)n, (unsigned)n);
+
+ if (n_written > 0) {
+ ipos += n_written;
+ *pos = ipos;
+ if (ipos > inode->i_size) {
+ inode->i_size = ipos;
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write size updated to %lld bytes, %d blocks",
+ ipos, (int)(inode->i_blocks));
+ }
+
+ }
+ yaffs_gross_unlock(dev);
+ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
+}
+
+
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *pg, void *fsdadata)
+{
+ int ret = 0;
+ void *addr, *kva;
+ uint32_t offset_into_page = pos & (PAGE_SIZE - 1);
+
+ kva = kmap(pg);
+ addr = kva + offset_into_page;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end addr %p pos %lld n_bytes %d",
+ addr, pos, copied);
+
+ ret = yaffs_file_write(filp, addr, copied, &pos);
+
+ if (ret != copied) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end not same size ret %d copied %d",
+ ret, copied);
+ SetPageError(pg);
+ }
+
+ kunmap(pg);
+
+ yaffs_release_space(filp);
+ unlock_page(pg);
+ put_page(pg);
+ return ret;
+}
+#else
+
+static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+ unsigned to)
+{
+ void *addr, *kva;
+
+ loff_t pos = (((loff_t) pg->index) << PAGE_SHIFT) + offset;
+ int n_bytes = to - offset;
+ int n_written;
+
+ kva = kmap(pg);
+ addr = kva + offset;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write addr %p pos %lld n_bytes %d",
+ addr, pos, n_bytes);
+
+ n_written = yaffs_file_write(f, addr, n_bytes, &pos);
+
+ if (n_written != n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write not same size n_written %d n_bytes %d",
+ n_written, n_bytes);
+ SetPageError(pg);
+ }
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write returning %d",
+ n_written == n_bytes ? 0 : n_written);
+
+ return n_written == n_bytes ? 0 : n_written;
+}
+#endif
+
+static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+ .write_begin = yaffs_write_begin,
+ .write_end = yaffs_write_end,
+#else
+ .prepare_write = yaffs_prepare_write,
+ .commit_write = yaffs_commit_write,
+#endif
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#define YCRED_FSUID() from_kuid(&init_user_ns, current_fsuid())
+#define YCRED_FSGID() from_kgid(&init_user_ns, current_fsgid())
+#else
+#define YCRED_FSUID() YCRED(current)->fsuid
+#define YCRED_FSGID() YCRED(current)->fsgid
+
+static inline uid_t i_uid_read(const struct inode *inode)
+{
+ return inode->i_uid;
+}
+
+static inline gid_t i_gid_read(const struct inode *inode)
+{
+ return inode->i_gid;
+}
+
+static inline void i_uid_write(struct inode *inode, uid_t uid)
+{
+ inode->i_uid = uid;
+}
+
+static inline void i_gid_write(struct inode *inode, gid_t gid)
+{
+ inode->i_gid = gid;
+}
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+#else
+static int yaffs_file_flush(struct file *file)
+#endif
+{
+ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_path.dentry);
+
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_flush object %d (%s)",
+ obj->obj_id,
+ obj->dirty ? "dirty" : "clean");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_file(obj, 1, 0);
+
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+static int yaffs_sync_object(struct file *file, int datasync)
+#else
+static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+ int datasync)
+#endif
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+ struct dentry *dentry = file->f_path.dentry;
+#endif
+
+ obj = yaffs_dentry_to_obj(dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_object");
+ yaffs_gross_lock(dev);
+ yaffs_flush_file(obj, 1, datasync);
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
+static const struct file_operations yaffs_file_operations = {
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .llseek = generic_file_llseek,
+};
+
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+
+static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .sendfile = generic_file_sendfile,
+};
+
+#else
+
+static const struct file_operations yaffs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ .sendfile = generic_file_sendfile,
+#endif
+};
+#endif
+
+
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+static void zero_user_segment(struct page *page, unsigned start, unsigned end)
+{
+ void *kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr + start, 0, end - start);
+ kunmap_atomic(kaddr, KM_USER0);
+ flush_dcache_page(page);
+}
+#endif
+
+
+static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
+{
+#ifdef YAFFS_USE_TRUNCATE_SETSIZE
+ truncate_setsize(inode, newsize);
+ return 0;
+#else
+ truncate_inode_pages(&inode->i_data, newsize);
+ return 0;
+#endif
+
+}
+
+
+static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
+{
+#ifdef YAFFS_USE_SETATTR_COPY
+ setattr_copy(inode, attr);
+ return 0;
+#else
+ return inode_setattr(inode, attr);
+#endif
+
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_setattr of object %d",
+ yaffs_inode_to_obj(inode)->obj_id);
+#if 0
+ /* Fail if a requested resize >= 2GB */
+ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
+ error = -EINVAL;
+#endif
+
+ if (error == 0)
+ error = setattr_prepare(dentry, attr);
+ if (error == 0) {
+ int result;
+ if (!error) {
+ error = yaffs_vfs_setattr(inode, attr);
+ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_vfs_setsize(inode, attr->ia_size);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+ }
+ }
+ dev = yaffs_inode_to_obj(inode)->my_dev;
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "resize to %d(%x)",
+ (int)(attr->ia_size),
+ (int)(attr->ia_size));
+ }
+ yaffs_gross_lock(dev);
+ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
+ if (result == YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+ }
+ yaffs_gross_unlock(dev);
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
+{
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
+
+ /* Currently we don't support posix ACL so never accept any settings
+ * start with "system.posix_acl_".
+ */
+ if (strncmp(name, "system.posix_acl_", 17))
+ error = -EOPNOTSUPP;
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_set_xattrib(obj, name, value, size, flags);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_getxattr(struct dentry * dentry, struct inode *inode,
+ const char *name, void *buff, size_t size)
+{
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_getxattr \"%s\" from object %d",
+ name, obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_get_xattrib(obj, name, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_remove_xattrib(obj, name);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_list_xattrib(obj, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr done returning %d", error);
+
+ return error;
+}
+
+
+static const struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+ .listxattr = yaffs_listxattr,
+};
+
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+ int buflen)
+{
+ unsigned char *alias;
+ int ret;
+
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+
+ yaffs_gross_unlock(dev);
+
+ if (!alias)
+ return -ENOMEM;
+
+ ret = readlink_copy(buffer, buflen, alias);
+ kfree(alias);
+ return ret;
+}
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+static const char *yaffs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ void *ret;
+#else
+static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ int ret
+#endif
+ unsigned char *alias;
+ int ret_int = 0;
+ struct yaffs_dev *dev;
+
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ dev = yaffs_dentry_to_obj(dentry)->my_dev;
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+ yaffs_gross_unlock(dev);
+
+ if (!alias) {
+ ret_int = -ENOMEM;
+ goto out;
+ }
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+ set_delayed_call(done, kfree_link, alias);
+ ret = alias;
+out:
+ if (ret_int)
+ ret = ERR_PTR(ret_int);
+ return ret;
+#else
+ ret = vfs_follow_link(nd, alias);
+ kfree(alias);
+out:
+ if (ret_int)
+ ret = ret_int;
+ return ret;
+#endif
+}
+
+
+#ifdef YAFFS_HAS_PUT_INODE
+
+/* For now put inode is just for debugging
+ * Put inode is called when the inode **structure** is put.
+ */
+static void yaffs_put_inode(struct inode *inode)
+{
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_put_inode: ino %d, count %d"),
+ (int)inode->i_ino, atomic_read(&inode->i_count);
+
+}
+#endif
+
+static const struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+ .get_link = yaffs_get_link,
+#else
+ .follow_link = yaffs_follow_link,
+#endif
+ .setattr = yaffs_setattr,
+ .listxattr = yaffs_listxattr,
+};
+
+#ifdef YAFFS_USE_OWN_IGET
+
+static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ yaffs_gross_unlock(dev);
+
+ unlock_new_inode(inode);
+ return inode;
+}
+
+#else
+
+static void yaffs_read_inode(struct inode *inode)
+{
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_inode for %d", (int)inode->i_ino);
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+}
+
+#endif
+
+
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+ struct yaffs_obj *obj)
+{
+ struct inode *inode;
+
+ if (!sb) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL super_block!!");
+ return NULL;
+
+ }
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL object!!");
+ return NULL;
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for object %d", obj->obj_id);
+
+ inode = Y_IGET(sb, obj->obj_id);
+ if (IS_ERR(inode))
+ return NULL;
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+ /* NB You can't be holding gross_lock or deadlock will happen! */
+
+ return inode;
+}
+
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+#define YCRED(x) x
+#else
+#define YCRED(x) (x->cred)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t rdev)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+#else
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ int rdev)
+#endif
+{
+ struct inode *inode;
+
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_dev *dev;
+
+ struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
+
+ int error = -ENOSPC;
+
+ uid_t uid = YCRED_FSUID();
+ gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
+
+ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: parent object %d type %d",
+ parent->obj_id, parent->variant_type);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: could not get parent object");
+ return -EPERM;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: making oject for %s, mode %x dev %x",
+ dentry->d_name.name, mode, rdev);
+
+ dev = parent->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, old_encode_dev(rdev));
+#else
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, rdev);
+#endif
+ break;
+ case S_IFREG: /* file */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
+ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
+ gid);
+ break;
+ case S_IFDIR: /* directory */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
+ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
+ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+
+ /* Can not call yaffs_get_inode() with gross lock held */
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod created object %d count = %d",
+ obj->obj_id, atomic_read(&inode->i_count));
+ error = 0;
+ yaffs_fill_inode_from_obj(dir, parent);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
+ error = -ENOMEM;
+ }
+
+ return error;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+#else
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+ int ret_val;
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir");
+ ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+ return ret_val;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool dummy)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ struct nameidata *n)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+#else
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_create");
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int dummy)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n)
+#else
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+#endif
+{
+ struct yaffs_obj *obj;
+ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
+
+ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s",
+ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
+
+ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
+
+ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
+
+ /* Can't hold gross lock when calling yaffs_get_inode() */
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup found %d", obj->obj_id);
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
+
+ }
+
+/* added NCB for 2.5/6 compatability - forces add even if inode is
+ * NULL which creates dentry hash */
+ d_add(dentry, inode);
+
+ return NULL;
+}
+
+/*
+ * Create a link...
+ */
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *link = NULL;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+ link =
+ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ obj);
+
+ if (link) {
+ set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj));
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_link link count %d i_count %d",
+ old_dentry->d_inode->i_nlink,
+ atomic_read(&old_dentry->d_inode->i_count));
+ }
+
+ yaffs_gross_unlock(dev);
+
+ if (link) {
+ update_dir_time(dir);
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ uid_t uid = YCRED_FSUID();
+ gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
+
+ if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ return -ENAMETOOLONG;
+
+ if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) >
+ YAFFS_MAX_ALIAS_LENGTH)
+ return -ENAMETOOLONG;
+
+ dev = yaffs_inode_to_obj(dir)->my_dev;
+ yaffs_gross_lock(dev);
+ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
+ return 0;
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct yaffs_dev *dev;
+ int ret_val = YAFFS_FAIL;
+ struct yaffs_obj *target;
+
+ if (flags)
+ return -EINVAL;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
+ dev = yaffs_inode_to_obj(old_dir)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+
+ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
+ !list_empty(&target->variant.dir_variant.children)) {
+
+ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
+
+ ret_val = YAFFS_FAIL;
+ } else {
+ /* Now does unlinking internally using shadowing mechanism */
+ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
+
+ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
+ old_dentry->d_name.name,
+ yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+ }
+ yaffs_gross_unlock(dev);
+
+ if (ret_val == YAFFS_OK) {
+ if (target)
+ inode_dec_link_count(new_dentry->d_inode);
+
+ update_dir_time(old_dir);
+ if (old_dir != new_dir)
+ update_dir_time(new_dir);
+ return 0;
+ } else {
+ return -ENOTEMPTY;
+ }
+}
+
+
+
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int ret_val;
+
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s",
+ (int)(dir->i_ino), dentry->d_name.name);
+ obj = yaffs_inode_to_obj(dir);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ ret_val = yaffs_unlinker(obj, dentry->d_name.name);
+
+ if (ret_val == YAFFS_OK) {
+ inode_dec_link_count(dentry->d_inode);
+ atomic64_inc(&dir->i_version);
+ yaffs_gross_unlock(dev);
+ update_dir_time(dir);
+ return 0;
+ }
+ yaffs_gross_unlock(dev);
+ return -ENOTEMPTY;
+}
+
+
+
+static const struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+ .unlink = yaffs_unlink,
+ .symlink = yaffs_symlink,
+ .mkdir = yaffs_mkdir,
+ .rmdir = yaffs_unlink,
+ .mknod = yaffs_mknod,
+ .rename = yaffs_rename,
+ .setattr = yaffs_setattr,
+ .listxattr = yaffs_listxattr,
+};
+
+/*-----------------------------------------------------------------*/
+/* Directory search context allows us to unlock access to yaffs during
+ * filldir without causing problems with the directory being modified.
+ * This is similar to the tried and tested mechanism used in yaffs direct.
+ *
+ * A search context iterates along a doubly linked list of siblings in the
+ * directory. If the iterating object is deleted then this would corrupt
+ * the list iteration, likely causing a crash. The search context avoids
+ * this by using the remove_obj_fn to move the search context to the
+ * next object before the object is deleted.
+ *
+ * Many readdirs (and thus seach conexts) may be alive simulateously so
+ * each struct yaffs_dev has a list of these.
+ *
+ * A seach context lives for the duration of a readdir.
+ *
+ * All these functions must be called while yaffs is locked.
+ */
+
+struct yaffs_search_context {
+ struct yaffs_dev *dev;
+ struct yaffs_obj *dir_obj;
+ struct yaffs_obj *next_return;
+ struct list_head others;
+};
+
+/*
+ * yaffs_new_search() creates a new search context, initialises it and
+ * adds it to the device's search context list.
+ *
+ * Called at start of readdir.
+ */
+static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
+{
+ struct yaffs_dev *dev = dir->my_dev;
+ struct yaffs_search_context *sc =
+ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
+ if (sc) {
+ sc->dir_obj = dir;
+ sc->dev = dev;
+ if (list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else
+ sc->next_return =
+ list_entry(dir->variant.dir_variant.children.next,
+ struct yaffs_obj, siblings);
+ INIT_LIST_HEAD(&sc->others);
+ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
+ }
+ return sc;
+}
+
+/*
+ * yaffs_search_end() disposes of a search context and cleans up.
+ */
+static void yaffs_search_end(struct yaffs_search_context *sc)
+{
+ if (sc) {
+ list_del(&sc->others);
+ kfree(sc);
+ }
+}
+
+/*
+ * yaffs_search_advance() moves a search context to the next object.
+ * Called when the search iterates or when an object removal causes
+ * the search context to be moved to the next object.
+ */
+static void yaffs_search_advance(struct yaffs_search_context *sc)
+{
+ if (!sc)
+ return;
+
+ if (sc->next_return == NULL ||
+ list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else {
+ struct list_head *next = sc->next_return->siblings.next;
+
+ if (next == &sc->dir_obj->variant.dir_variant.children)
+ sc->next_return = NULL; /* end of list */
+ else
+ sc->next_return =
+ list_entry(next, struct yaffs_obj, siblings);
+ }
+}
+
+/*
+ * yaffs_remove_obj_callback() is called when an object is unlinked.
+ * We check open search contexts and advance any which are currently
+ * on the object being iterated.
+ */
+static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
+{
+
+ struct list_head *i;
+ struct yaffs_search_context *sc;
+ struct list_head *search_contexts =
+ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
+
+ /* Iterate through the directory search contexts.
+ * If any are currently on the object being removed, then advance
+ * the search context to the next object to prevent a hanging pointer.
+ */
+ list_for_each(i, search_contexts) {
+ sc = list_entry(i, struct yaffs_search_context, others);
+ if (sc->next_return == obj)
+ yaffs_search_advance(sc);
+ }
+
+}
+
+
+/*-----------------------------------------------------------------*/
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+static int yaffs_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+ u64 i_version;
+
+ obj = yaffs_dentry_to_obj(file->f_path.dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ offset = ctx->pos;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: starting at %d", (int)offset);
+
+ if (offset == 0) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry . ino %d",
+ (int)inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (!dir_emit_dot(file, ctx)) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ ctx->pos++;
+ }
+ if (offset == 1) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry .. ino %d",
+ (int)file->f_path.dentry->d_parent->d_inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (!dir_emit_dotdot(file, ctx)) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ ctx->pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+ i_version = atomic64_read(&inode->i_version);
+ if (file->f_version != i_version) {
+ offset = 2;
+ ctx->pos = offset;
+ file->f_version = i_version;
+ }
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= offset) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (!dir_emit(ctx, name, strlen(name),
+ this_inode, this_type)) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ offset++;
+ ctx->pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+#else
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ struct inode *inode = f->f_path.dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ offset = f->f_pos;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: starting at %d", (int)offset);
+
+ if (offset == 0) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry . ino %d",
+ (int)inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry .. ino %d",
+ (int)f->f_path.dentry->d_parent->d_inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, "..", 2, offset,
+ f->f_path.dentry->d_parent->d_inode->i_ino,
+ DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+ if (f->f_version != inode->i_version) {
+ offset = 2;
+ f->f_pos = offset;
+ f->f_version = inode->i_version;
+ }
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= offset) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (filldir(dirent,
+ name,
+ strlen(name),
+ offset, this_inode, this_type) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ offset++;
+ f->f_pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+#endif
+
+static const struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ .iterate = yaffs_readdir,
+#else
+ .readdir = yaffs_readdir,
+#endif
+ .fsync = yaffs_sync_object,
+ .llseek = generic_file_llseek,
+};
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj)
+{
+ if (inode && obj) {
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+ u32 mode = obj->yst_mode;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (!S_ISREG(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFREG;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (!S_ISLNK(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFLNK;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!S_ISDIR(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFDIR;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ default:
+ /* TODO? */
+ break;
+ }
+
+ inode->i_flags |= S_NOATIME;
+
+ inode->i_ino = obj->obj_id;
+ inode->i_mode = obj->yst_mode;
+ i_uid_write(inode, obj->yst_uid);
+ i_gid_write(inode, obj->yst_gid);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ inode->i_blksize = inode->i_sb->s_blocksize;
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+ inode->i_atime.tv_nsec = 0;
+ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+ inode->i_mtime.tv_nsec = 0;
+ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+ inode->i_ctime.tv_nsec = 0;
+#else
+ inode->i_rdev = obj->yst_rdev;
+ inode->i_atime = obj->yst_atime;
+ inode->i_mtime = obj->yst_mtime;
+ inode->i_ctime = obj->yst_ctime;
+#endif
+ inode->i_size = yaffs_get_obj_length(obj);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ set_nlink(inode, yaffs_get_obj_link_count(obj));
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode mode %x uid %d gid %d size %lld count %d",
+ inode->i_mode, i_uid_read(inode), i_gid_read(inode),
+ inode->i_size, atomic_read(&inode->i_count));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ init_special_inode(inode, obj->yst_mode,
+ old_decode_dev(obj->yst_rdev));
+#else
+ init_special_inode(inode, obj->yst_mode,
+ (dev_t) (obj->yst_rdev));
+#endif
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+ break;
+ case S_IFLNK: /* symlink */
+ inode->i_op = &yaffs_symlink_inode_operations;
+ break;
+ }
+
+ yaffs_inode_to_obj_lv(inode) = obj;
+
+ obj->my_inode = inode;
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode invalid parameters");
+ }
+
+}
+
+
+
+/*
+ * yaffs background thread functions .
+ * yaffs_bg_thread_fn() the thread function
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+ * NB:
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+ */
+
+static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
+{
+ unsigned erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned scattered = 0; /* Free chunks not in an erased block */
+
+ if (erased_chunks < dev->n_free_chunks)
+ scattered = (dev->n_free_chunks - erased_chunks);
+
+ if (!context->bg_running)
+ return 0;
+ else if (scattered < (dev->param.chunks_per_block * 2))
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 2)
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 4)
+ return 1;
+ else
+ return 2;
+}
+
+#ifdef YAFFS_COMPILE_BACKGROUND
+struct yaffs_timer {
+ struct timer_list timer;
+ struct task_struct *tsk;
+};
+
+void yaffs_background_waker(struct timer_list *t)
+{
+ struct yaffs_timer *yt = from_timer(yt, t, timer);
+
+ wake_up_process(yt->tsk);
+}
+
+static int yaffs_bg_thread_fn(void *data)
+{
+ struct yaffs_dev *dev = (struct yaffs_dev *)data;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned long now = jiffies;
+ unsigned long next_dir_update = now;
+ unsigned long next_gc = now;
+ unsigned long expires;
+ unsigned int urgency;
+
+ int gc_result;
+ struct yaffs_timer yt;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "yaffs_background starting for dev %p", (void *)dev);
+
+#ifdef YAFFS_COMPILE_FREEZER
+ set_freezable();
+#endif
+ while (context->bg_running) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
+
+ if (kthread_should_stop())
+ break;
+
+#ifdef YAFFS_COMPILE_FREEZER
+ if (try_to_freeze())
+ continue;
+#endif
+ yaffs_gross_lock(dev);
+
+ now = jiffies;
+
+ if (time_after(now, next_dir_update) && yaffs_bg_enable) {
+ yaffs_update_dirty_dirs(dev);
+ next_dir_update = now + HZ;
+ }
+
+ if (time_after(now, next_gc) && yaffs_bg_enable) {
+ if (!dev->is_checkpointed) {
+ urgency = yaffs_bg_gc_urgency(dev);
+ gc_result = yaffs_bg_gc(dev, urgency);
+ if (urgency > 1)
+ next_gc = now + HZ / 20 + 1;
+ else if (urgency > 0)
+ next_gc = now + HZ / 10 + 1;
+ else
+ next_gc = now + HZ * 2;
+ } else {
+ /*
+ * gc not running so set to next_dir_update
+ * to cut down on wake ups
+ */
+ next_gc = next_dir_update;
+ }
+ }
+ yaffs_gross_unlock(dev);
+#if 1
+ expires = next_dir_update;
+ if (time_before(next_gc, expires))
+ expires = next_gc;
+ if (time_before(expires, now))
+ expires = now + HZ;
+
+ Y_INIT_TIMER(&yt.timer);
+ yt.timer.function = yaffs_background_waker;
+ yt.timer.expires = expires + 1;
+ yt.tsk = current;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_timer(&yt.timer);
+ schedule();
+ del_timer_sync(&yt.timer);
+#else
+ msleep(10);
+#endif
+ }
+
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ int retval = 0;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+
+ if (dev->read_only)
+ return -1;
+
+ context->bg_running = 1;
+
+ context->bg_thread = kthread_run(yaffs_bg_thread_fn,
+ (void *)dev, "yaffs-bg-%d",
+ context->mount_id);
+
+ if (IS_ERR(context->bg_thread)) {
+ retval = PTR_ERR(context->bg_thread);
+ context->bg_thread = NULL;
+ context->bg_running = 0;
+ }
+ return retval;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
+
+ ctxt->bg_running = 0;
+
+ if (ctxt->bg_thread) {
+ kthread_stop(ctxt->bg_thread);
+ ctxt->bg_thread = NULL;
+ }
+}
+#else
+static int yaffs_bg_thread_fn(void *data)
+{
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ return 0;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+}
+#endif
+
+
+static void yaffs_flush_inodes(struct super_block *sb)
+{
+ struct inode *iptr;
+ struct yaffs_obj *obj;
+
+ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
+ obj = yaffs_inode_to_obj(iptr);
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "flushing obj %d",
+ obj->obj_id);
+ yaffs_flush_file(obj, 1, 0);
+ }
+ }
+}
+
+static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ if (!dev)
+ return;
+
+ yaffs_flush_inodes(sb);
+ yaffs_update_dirty_dirs(dev);
+ yaffs_flush_whole_cache(dev);
+ if (do_checkpoint)
+ yaffs_checkpoint_save(dev);
+}
+
+static LIST_HEAD(yaffs_context_list);
+struct mutex yaffs_context_lock;
+
+static void yaffs_put_super(struct super_block *sb)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
+ "yaffs_put_super");
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "Shutting down yaffs background thread");
+ yaffs_bg_stop(dev);
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "yaffs background thread shut down");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_super(sb, 1);
+
+ yaffs_deinitialise(dev);
+
+ yaffs_gross_unlock(dev);
+
+ mutex_lock(&yaffs_context_lock);
+ list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
+ mutex_unlock(&yaffs_context_lock);
+
+ if (yaffs_dev_to_lc(dev)->spare_buffer) {
+ kfree(yaffs_dev_to_lc(dev)->spare_buffer);
+ yaffs_dev_to_lc(dev)->spare_buffer = NULL;
+ }
+
+ if (dev->os_context)
+ kfree(dev->os_context);
+
+ kfree(dev);
+
+ yaffs_put_mtd_device(mtd);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
+ "yaffs_put_super done");
+}
+
+
+static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
+{
+ return yaffs_gc_control;
+}
+
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+
+static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
+ uint32_t generation)
+{
+ return Y_IGET(sb, ino);
+}
+
+static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+struct dentry *yaffs2_get_parent(struct dentry *dentry)
+{
+
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct dentry *parent = ERR_PTR(-ENOENT);
+ struct inode *inode;
+ unsigned long parent_ino;
+ struct yaffs_obj *d_obj;
+ struct yaffs_obj *parent_obj;
+
+ d_obj = yaffs_inode_to_obj(dentry->d_inode);
+
+ if (d_obj) {
+ parent_obj = d_obj->parent;
+ if (parent_obj) {
+ parent_ino = yaffs_get_obj_inode(parent_obj);
+ inode = Y_IGET(sb, parent_ino);
+
+ if (IS_ERR(inode)) {
+ parent = ERR_CAST(inode);
+ } else {
+ parent = d_obtain_alias(inode);
+ if (!IS_ERR(parent)) {
+ parent = ERR_PTR(-ENOMEM);
+ iput(inode);
+ }
+ }
+ }
+ }
+
+ return parent;
+}
+
+/* Just declare a zero structure as a NULL value implies
+ * using the default functions of exportfs.
+ */
+
+static struct export_operations yaffs_export_ops = {
+ .fh_to_dentry = yaffs2_fh_to_dentry,
+ .fh_to_parent = yaffs2_fh_to_parent,
+ .get_parent = yaffs2_get_parent,
+};
+
+#endif
+
+static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
+{
+ /* Clear the association between the inode and
+ * the struct yaffs_obj.
+ */
+ obj->my_inode = NULL;
+ yaffs_inode_to_obj_lv(inode) = NULL;
+
+ /* If the object freeing was deferred, then the real
+ * free happens now.
+ * This should fix the inode inconsistency problem.
+ */
+ yaffs_handle_defered_free(obj);
+}
+
+#ifdef YAFFS_HAS_EVICT_INODE
+/* yaffs_evict_inode combines into one operation what was previously done in
+ * yaffs_clear_inode() and yaffs_delete_inode()
+ *
+ */
+static void yaffs_evict_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ int deleteme = 0;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_evict_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (!inode->i_nlink && !is_bad_inode(inode))
+ deleteme = 1;
+ truncate_inode_pages(&inode->i_data, 0);
+ Y_CLEAR_INODE(inode);
+
+ if (deleteme && obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+}
+#else
+
+/* clear is called to tell the fs to release any per-inode data it holds.
+ * The object might still exist on disk and is just being thrown out of the cache
+ * or else the object has actually been deleted and we're being called via
+ * the chain
+ * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
+ */
+
+static void yaffs_clear_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_clear_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+
+}
+
+/* delete is called when the link count is zero and the inode
+ * is put (ie. nobody wants to know about it anymore, time to
+ * delete the file).
+ * NB Must call clear_inode()
+ */
+static void yaffs_delete_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_delete_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+ truncate_inode_pages(&inode->i_data, 0);
+#endif
+ clear_inode(inode);
+}
+#endif
+
+
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+ struct super_block *sb = dentry->d_sb;
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+#else
+static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+#endif
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
+
+ yaffs_gross_lock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+
+ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
+ /* Do this if chunk size is not a power of 2 */
+
+ uint64_t bytes_in_dev;
+ uint64_t bytes_free;
+
+ bytes_in_dev =
+ ((uint64_t)
+ ((dev->param.end_block - dev->param.start_block +
+ 1))) * ((uint64_t) (dev->param.chunks_per_block *
+ dev->data_bytes_per_chunk));
+
+ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */
+ buf->f_blocks = bytes_in_dev;
+
+ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
+ ((uint64_t) (dev->data_bytes_per_chunk));
+
+ do_div(bytes_free, sb->s_blocksize);
+
+ buf->f_bfree = bytes_free;
+
+ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
+
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ } else {
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+ }
+
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+
+
+static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
+{
+
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
+ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
+ int do_checkpoint;
+ int dirty = yaffs_check_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_do_sync_fs: gc-urgency %d %s %s%s",
+ gc_urgent,
+ dirty ? "dirty" : "clean",
+ request_checkpoint ? "checkpoint requested" : "no checkpoint",
+ oneshot_checkpoint ? " one-shot" : "");
+
+ yaffs_gross_lock(dev);
+ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
+ oneshot_checkpoint) && !dev->is_checkpointed;
+
+ if (dirty || do_checkpoint) {
+ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
+ yaffs_clear_super_dirty(dev);
+ if (oneshot_checkpoint)
+ yaffs_auto_checkpoint &= ~4;
+ }
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+
+#ifdef YAFFS_HAS_WRITE_SUPER
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static void yaffs_write_super(struct super_block *sb)
+#else
+static int yaffs_write_super(struct super_block *sb)
+#endif
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_write_super %s",
+ request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
+ return 0;
+#endif
+}
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+#else
+static int yaffs_sync_fs(struct super_block *sb)
+#endif
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+ return 0;
+}
+
+/* the function only is used to change dev->read_only when this file system
+ * is remounted.
+ */
+static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+ int read_only = 0;
+ struct mtd_info *mtd;
+ struct yaffs_dev *dev = 0;
+
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device #%u doesn't appear to exist",
+ MINOR(sb->s_dev));
+ return 1;
+ }
+
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device is not NAND it's type %d",
+ mtd->type);
+ return 1;
+ }
+
+ read_only = ((*flags & MS_RDONLY) != 0);
+ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+ read_only = 1;
+ printk(KERN_INFO
+ "yaffs: mtd is read only, setting superblock read only");
+ *flags |= MS_RDONLY;
+ }
+
+ dev = sb->s_fs_info;
+ dev->read_only = read_only;
+
+ return 0;
+}
+
+static const struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
+
+#ifndef YAFFS_USE_OWN_IGET
+ .read_inode = yaffs_read_inode,
+#endif
+#ifdef YAFFS_HAS_PUT_INODE
+ .put_inode = yaffs_put_inode,
+#endif
+ .put_super = yaffs_put_super,
+#ifdef YAFFS_HAS_EVICT_INODE
+ .evict_inode = yaffs_evict_inode,
+#else
+ .delete_inode = yaffs_delete_inode,
+ .clear_inode = yaffs_clear_inode,
+#endif
+ .sync_fs = yaffs_sync_fs,
+#ifdef YAFFS_HAS_WRITE_SUPER
+ .write_super = yaffs_write_super,
+#endif
+ .remount_fs = yaffs_remount_fs,
+};
+
+struct yaffs_options {
+ int inband_tags;
+ int skip_checkpoint_read;
+ int skip_checkpoint_write;
+ int no_cache;
+ int tags_ecc_on;
+ int tags_ecc_overridden;
+ int lazy_loading_enabled;
+ int lazy_loading_overridden;
+ int empty_lost_and_found;
+ int empty_lost_and_found_overridden;
+ int disable_summary;
+};
+
+#define MAX_OPT_LEN 30
+static int yaffs_parse_options(struct yaffs_options *options,
+ const char *options_str)
+{
+ char cur_opt[MAX_OPT_LEN + 1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+ while (options_str && *options_str && !error) {
+ memset(cur_opt, 0, MAX_OPT_LEN + 1);
+ p = 0;
+
+ while (*options_str == ',')
+ options_str++;
+
+ while (*options_str && *options_str != ',') {
+ if (p < MAX_OPT_LEN) {
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+ if (!strcmp(cur_opt, "inband-tags")) {
+ options->inband_tags = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-off")) {
+ options->tags_ecc_on = 0;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-on")) {
+ options->tags_ecc_on = 1;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-off")) {
+ options->lazy_loading_enabled = 0;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-on")) {
+ options->lazy_loading_enabled = 1;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "disable-summary")) {
+ options->disable_summary = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
+ options->empty_lost_and_found = 0;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
+ options->empty_lost_and_found = 1;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "no-cache")) {
+ options->no_cache = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-read")) {
+ options->skip_checkpoint_read = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-write")) {
+ options->skip_checkpoint_write = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint")) {
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+ cur_opt);
+ error = 1;
+ }
+ }
+
+ return error;
+}
+
+
+static struct dentry *yaffs_make_root(struct inode *inode)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+ struct dentry *root = d_alloc_root(inode);
+
+ if (!root)
+ iput(inode);
+
+ return root;
+#else
+ return d_make_root(inode);
+#endif
+}
+
+
+static int yaffs_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buff, size_t size)
+{
+ return yaffs_getxattr(dentry, inode, name, buff, size);
+}
+
+static int yaffs_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
+{
+ if (value)
+ return yaffs_setxattr(dentry, inode, name, value, size, flags);
+ else
+ return yaffs_removexattr(dentry, name);
+}
+
+static const struct xattr_handler yaffs_xattr_handler = {
+ .prefix = "", /* match anything */
+ .get = yaffs_xattr_get,
+ .set = yaffs_xattr_set,
+};
+
+static const struct xattr_handler *yaffs_xattr_handlers[] = {
+ &yaffs_xattr_handler,
+ NULL
+};
+
+static struct super_block *yaffs_internal_read_super(int yaffs_version,
+ struct super_block *sb,
+ void *data, int silent)
+{
+ int n_blocks;
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct yaffs_dev *dev = 0;
+ char devname_buf[BDEVNAME_SIZE + 1];
+ struct mtd_info *mtd;
+ int err;
+ char *data_str = (char *)data;
+ struct yaffs_linux_context *context = NULL;
+ struct yaffs_param *param;
+
+ int read_only = 0;
+ int inband_tags = 0;
+
+ struct yaffs_options options;
+
+ unsigned mount_id;
+ int found;
+ struct yaffs_linux_context *context_iterator;
+ struct list_head *l;
+
+ if (!sb) {
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+ return NULL;
+ }
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
+ sb->s_xattr = yaffs_xattr_handlers;
+ sb->s_flags |= MS_NOATIME;
+
+ read_only = ((sb->s_flags & MS_RDONLY) != 0);
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+ sb->s_export_op = &yaffs_export_ops;
+#endif
+
+ if (!sb->s_dev)
+ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+ else if (!yaffs_devname(sb, devname_buf))
+ printk(KERN_INFO "yaffs: devname is NULL\n");
+ else
+ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
+
+ if (!data_str)
+ data_str = "";
+
+ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+ memset(&options, 0, sizeof(options));
+
+ if (yaffs_parse_options(&options, data_str)) {
+ /* Option parsing failed */
+ return NULL;
+ }
+
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: Using yaffs%d", yaffs_version);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: block size %d", (int)(sb->s_blocksize));
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Attempting MTD mount of %u.%u,\"%s\"",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ yaffs_devname(sb, devname_buf));
+
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: MTD device %u either not valid or unavailable",
+ MINOR(sb->s_dev));
+ return NULL;
+ }
+
+ if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
+ yaffs_version = 2;
+ }
+
+ if (mtd->oobavail < sizeof(struct yaffs_packed_tags2) ||
+ options.inband_tags)
+ inband_tags = 1;
+
+ /* Added NCB 26/5/2006 for completeness */
+ if (yaffs_version == 2 && !inband_tags
+ && WRITE_SIZE(mtd) == 512) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
+ yaffs_version = 1;
+ }
+
+ if(yaffs_verify_mtd(mtd, yaffs_version, inband_tags) < 0)
+ return NULL;
+
+ /* OK, so if we got here, we have an MTD that's NAND and looks
+ * like it has the right capabilities
+ * Set the struct yaffs_dev up for mtd
+ */
+
+ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+ read_only = 1;
+ printk(KERN_INFO
+ "yaffs: mtd is read only, setting superblock read only\n"
+ );
+ sb->s_flags |= MS_RDONLY;
+ }
+
+ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
+ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
+
+ if (!dev || !context) {
+ if (dev)
+ kfree(dev);
+ if (context)
+ kfree(context);
+ dev = NULL;
+ context = NULL;
+ }
+
+ if (!dev) {
+ /* Deep shit could not allocate device structure */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: Failed trying to allocate struct yaffs_dev."
+ );
+ return NULL;
+ }
+ memset(dev, 0, sizeof(struct yaffs_dev));
+ param = &(dev->param);
+
+ memset(context, 0, sizeof(struct yaffs_linux_context));
+ dev->os_context = context;
+ INIT_LIST_HEAD(&(context->context_list));
+ context->dev = dev;
+ context->super = sb;
+
+ dev->read_only = read_only;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ sb->s_fs_info = dev;
+#else
+ sb->u.generic_sbp = dev;
+#endif
+
+
+ dev->driver_context = mtd;
+ param->name = mtd->name;
+
+ /* Set up the memory size parameters.... */
+
+
+ param->n_reserved_blocks = 5;
+ param->n_caches = (options.no_cache) ? 0 : 10;
+ param->inband_tags = inband_tags;
+
+ param->enable_xattr = 1;
+ if (options.lazy_loading_overridden)
+ param->disable_lazy_load = !options.lazy_loading_enabled;
+
+ param->defered_dir_update = 1;
+
+ if (options.tags_ecc_overridden)
+ param->no_tags_ecc = !options.tags_ecc_on;
+
+ param->empty_lost_n_found = 1;
+ param->refresh_period = 500;
+ param->disable_summary = options.disable_summary;
+
+
+#ifdef CONFIG_YAFFS_DISABLE_BAD_BLOCK_MARKING
+ param->disable_bad_block_marking = 1;
+#endif
+ if (options.empty_lost_and_found_overridden)
+ param->empty_lost_n_found = options.empty_lost_and_found;
+
+ /* ... and the functions. */
+ if (yaffs_version == 2) {
+ param->is_yaffs2 = 1;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ param->total_bytes_per_chunk = mtd->writesize;
+ param->chunks_per_block = mtd->erasesize / mtd->writesize;
+#else
+ param->total_bytes_per_chunk = mtd->oobblock;
+ param->chunks_per_block = mtd->erasesize / mtd->oobblock;
+#endif
+ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ } else {
+ param->is_yaffs2 = 0;
+ n_blocks = YCALCBLOCKS(mtd->size,
+ YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
+
+ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
+ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
+ }
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+
+ yaffs_mtd_drv_install(dev);
+
+ param->sb_dirty_fn = yaffs_set_super_dirty;
+ param->gc_control_fn = yaffs_gc_control_callback;
+
+ yaffs_dev_to_lc(dev)->super = sb;
+
+ param->use_nand_ecc = 1;
+
+ param->skip_checkpt_rd = options.skip_checkpoint_read;
+ param->skip_checkpt_wr = options.skip_checkpoint_write;
+
+ mutex_lock(&yaffs_context_lock);
+ /* Get a mount id */
+ found = 0;
+ for (mount_id = 0; !found; mount_id++) {
+ found = 1;
+ list_for_each(l, &yaffs_context_list) {
+ context_iterator =
+ list_entry(l, struct yaffs_linux_context,
+ context_list);
+ if (context_iterator->mount_id == mount_id)
+ found = 0;
+ }
+ }
+ context->mount_id = mount_id;
+
+ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
+ &yaffs_context_list);
+ mutex_unlock(&yaffs_context_lock);
+
+ /* Directory search handling... */
+ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
+ param->remove_obj_fn = yaffs_remove_obj_callback;
+
+ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
+
+ yaffs_gross_lock(dev);
+
+ err = yaffs_guts_initialise(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: guts initialised %s",
+ (err == YAFFS_OK) ? "OK" : "FAILED");
+
+ if (err == YAFFS_OK)
+ yaffs_bg_start(dev);
+
+ if (!context->bg_thread)
+ param->defered_dir_update = 0;
+
+ sb->s_maxbytes = yaffs_max_file_size(dev);
+
+ /* Release lock before yaffs_get_inode() */
+ yaffs_gross_unlock(dev);
+
+ /* Create root inode */
+ if (err == YAFFS_OK)
+ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
+
+ if (!inode)
+ return NULL;
+
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
+
+ root = yaffs_make_root(inode);
+
+ if (!root)
+ return NULL;
+
+ sb->s_root = root;
+ if(!dev->is_checkpointed)
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
+ return sb;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ .mount = yaffs_mount,
+#else
+ .get_sb = yaffs_read_super,
+#endif
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+ FS_REQUIRES_DEV);
+#endif
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ .mount = yaffs2_mount,
+#else
+ .get_sb = yaffs2_read_super,
+#endif
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs2_read_super(struct super_block *sb,
+ void *data, int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+ FS_REQUIRES_DEV);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+static struct proc_dir_entry *my_proc_entry;
+
+static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
+{
+ struct yaffs_param *param = &dev->param;
+ int bs[10];
+
+ yaffs_count_blocks_by_state(dev,bs);
+
+ buf += sprintf(buf, "start_block.......... %d\n", param->start_block);
+ buf += sprintf(buf, "end_block............ %d\n", param->end_block);
+ buf += sprintf(buf, "total_bytes_per_chunk %d\n",
+ param->total_bytes_per_chunk);
+ buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc);
+ buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc);
+ buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2);
+ buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags);
+ buf += sprintf(buf, "empty_lost_n_found... %d\n",
+ param->empty_lost_n_found);
+ buf += sprintf(buf, "disable_lazy_load.... %d\n",
+ param->disable_lazy_load);
+ buf += sprintf(buf, "disable_bad_block_mrk %d\n",
+ param->disable_bad_block_marking);
+ buf += sprintf(buf, "refresh_period....... %d\n",
+ param->refresh_period);
+ buf += sprintf(buf, "n_caches............. %d\n", param->n_caches);
+ buf += sprintf(buf, "n_reserved_blocks.... %d\n",
+ param->n_reserved_blocks);
+ buf += sprintf(buf, "always_check_erased.. %d\n",
+ param->always_check_erased);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "block count by state\n");
+ buf += sprintf(buf, "0:%d 1:%d 2:%d 3:%d 4:%d\n",
+ bs[0], bs[1], bs[2], bs[3], bs[4]);
+ buf += sprintf(buf, "5:%d 6:%d 7:%d 8:%d 9:%d\n",
+ bs[5], bs[6], bs[7], bs[8], bs[9]);
+
+ return buf;
+}
+
+static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
+{
+ buf += sprintf(buf, "max file size....... %lld\n",
+ (long long) yaffs_max_file_size(dev));
+ buf += sprintf(buf, "data_bytes_per_chunk. %d\n",
+ dev->data_bytes_per_chunk);
+ buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
+ buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
+ buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
+ buf += sprintf(buf, "blocks_in_checkpt.... %d\n",
+ dev->blocks_in_checkpt);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
+ buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
+ buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
+ buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
+ buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
+ buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
+ buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
+ buf += sprintf(buf, "passive_gc_count..... %u\n",
+ dev->passive_gc_count);
+ buf += sprintf(buf, "oldest_dirty_gc_count %u\n",
+ dev->oldest_dirty_gc_count);
+ buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
+ buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
+ buf += sprintf(buf, "n_retried_writes..... %u\n",
+ dev->n_retried_writes);
+ buf += sprintf(buf, "n_retired_blocks..... %u\n",
+ dev->n_retired_blocks);
+ buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
+ buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
+ buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n",
+ dev->n_tags_ecc_fixed);
+ buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n",
+ dev->n_tags_ecc_unfixed);
+ buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
+ buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
+ buf += sprintf(buf, "n_unlinked_files..... %u\n",
+ dev->n_unlinked_files);
+ buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
+ buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
+ buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used);
+ buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used);
+
+ return buf;
+}
+
+static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+{
+ struct list_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+
+ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+ * We use 'offset' (*ppos) to indicate where we are in dev_list.
+ * This also assumes the user has posted a read buffer large
+ * enough to hold the complete output; but that's life in /proc.
+ */
+
+ *(int *)start = 1;
+
+ /* Print header first */
+ if (step == 0)
+ buf +=
+ sprintf(buf, "Multi-version YAFFS\n");
+ else if (step == 1)
+ buf += sprintf(buf, "\n");
+ else {
+ step -= 2;
+
+ mutex_lock(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_context_list) {
+ struct yaffs_linux_context *dc =
+ list_entry(item, struct yaffs_linux_context,
+ context_list);
+ struct yaffs_dev *dev = dc->dev;
+
+ if (n < (step & ~1)) {
+ n += 2;
+ continue;
+ }
+ if ((step & 1) == 0) {
+ buf +=
+ sprintf(buf, "\nDevice %d \"%s\"\n", n,
+ dev->param.name);
+ buf = yaffs_dump_dev_part0(buf, dev);
+ } else {
+ buf = yaffs_dump_dev_part1(buf, dev);
+ }
+
+ break;
+ }
+ mutex_unlock(&yaffs_context_lock);
+ }
+
+ return buf - page < count ? buf - page : count;
+}
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+ char *mask_name;
+ unsigned mask_bitfield;
+} mask_flags[] = {
+ {"allocate", YAFFS_TRACE_ALLOCATE},
+ {"always", YAFFS_TRACE_ALWAYS},
+ {"background", YAFFS_TRACE_BACKGROUND},
+ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+ {"buffers", YAFFS_TRACE_BUFFERS},
+ {"bug", YAFFS_TRACE_BUG},
+ {"checkpt", YAFFS_TRACE_CHECKPOINT},
+ {"deletion", YAFFS_TRACE_DELETION},
+ {"erase", YAFFS_TRACE_ERASE},
+ {"error", YAFFS_TRACE_ERROR},
+ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+ {"gc", YAFFS_TRACE_GC},
+ {"lock", YAFFS_TRACE_LOCK},
+ {"mtd", YAFFS_TRACE_MTD},
+ {"nandaccess", YAFFS_TRACE_NANDACCESS},
+ {"os", YAFFS_TRACE_OS},
+ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+ {"scan", YAFFS_TRACE_SCAN},
+ {"mount", YAFFS_TRACE_MOUNT},
+ {"tracing", YAFFS_TRACE_TRACING},
+ {"sync", YAFFS_TRACE_SYNC},
+ {"write", YAFFS_TRACE_WRITE},
+ {"verify", YAFFS_TRACE_VERIFY},
+ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+ {"all", 0xffffffff},
+ {"none", 0},
+ {NULL, 0},
+};
+
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ unsigned rg = 0, mask_bitfield;
+ char *end;
+ char *mask_name;
+ const char *x;
+ char substring[MAX_MASK_NAME_LENGTH + 1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+ int pos = 0;
+
+ rg = yaffs_trace_mask;
+
+ while (!done && (pos < count)) {
+ done = 1;
+ while ((pos < count) && isspace(buf[pos]))
+ pos++;
+
+ switch (buf[pos]) {
+ case '+':
+ case '-':
+ case '=':
+ add = buf[pos];
+ pos++;
+ break;
+
+ default:
+ add = ' ';
+ break;
+ }
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+ for (x = buf + pos, i = 0;
+ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ if (strcmp(substring, mask_flags[i].mask_name)
+ == 0) {
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield =
+ mask_flags[i].mask_bitfield;
+ done = 0;
+ break;
+ }
+ }
+ }
+
+ if (mask_name != NULL) {
+ done = 0;
+ switch (add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+ case '+':
+ rg |= mask_bitfield;
+ break;
+ case '=':
+ rg = mask_bitfield;
+ break;
+ default:
+ rg |= mask_bitfield;
+ break;
+ }
+ }
+ }
+
+ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
+
+ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) ==
+ mask_flags[i].mask_bitfield) ? '+' : '-';
+ printk(KERN_DEBUG "%c%s\n", flag,
+ mask_flags[i].mask_name);
+ }
+ }
+
+ return count;
+}
+
+/* Debug strings are of the form:
+ * .bnnn print info on block n
+ * .cobjn,chunkn print nand chunk id for objn:chunkn
+ */
+
+static int yaffs_proc_debug_write(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+
+ char str[100];
+ char *p0;
+ char *p1;
+ long p1_val;
+ long p0_val;
+ char cmd;
+ struct list_head *item;
+
+ memset(str, 0, sizeof(str));
+ memcpy(str, buf, min(count, sizeof(str) -1));
+
+ cmd = str[1];
+
+ p0 = str + 2;
+
+ p1 = p0;
+
+ while (*p1 && *p1 != ',') {
+ p1++;
+ }
+ *p1 = '\0';
+ p1++;
+
+ p0_val = simple_strtol(p0, NULL, 0);
+ p1_val = simple_strtol(p1, NULL, 0);
+
+
+ mutex_lock(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_context_list) {
+ struct yaffs_linux_context *dc =
+ list_entry(item, struct yaffs_linux_context,
+ context_list);
+ struct yaffs_dev *dev = dc->dev;
+
+ if (cmd == 'b') {
+ struct yaffs_block_info *bi;
+
+ bi = yaffs_get_block_info(dev,p0_val);
+
+ if(bi) {
+ printk("Block %d: state %d, retire %d, use %d, seq %d\n",
+ (int)p0_val, bi->block_state,
+ bi->needs_retiring, bi->pages_in_use,
+ bi->seq_number);
+ }
+ } else if (cmd == 'c') {
+ struct yaffs_obj *obj;
+ int nand_chunk;
+
+ obj = yaffs_find_by_number(dev, p0_val);
+ if (!obj)
+ printk("No obj %d\n", (int)p0_val);
+ else {
+ if(p1_val == 0)
+ nand_chunk = obj->hdr_chunk;
+ else
+ nand_chunk =
+ yaffs_find_chunk_in_file(obj,
+ p1_val, NULL);
+ printk("Nand chunk for %d:%d is %d\n",
+ (int)p0_val, (int)p1_val, nand_chunk);
+ }
+ }
+ }
+
+ mutex_unlock(&yaffs_context_lock);
+
+ return count;
+}
+
+static int yaffs_proc_write(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ if (buf[0] == '.')
+ return yaffs_proc_debug_write(file, buf, count, data);
+ return yaffs_proc_write_trace_options(file, buf, count, data);
+}
+#endif
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+ struct file_system_type *fst;
+ int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+ {&yaffs_fs_type, 0},
+ {&yaffs2_fs_type, 0},
+ {NULL, 0}
+};
+
+static int __init init_yaffs_fs(void)
+{
+ int error = 0;
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs Installing.");
+
+ mutex_init(&yaffs_context_lock);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+ /* Install the proc_fs entries */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG, YPROC_ROOT);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+ } else {
+ return -ENOMEM;
+ }
+#endif
+
+ /* Now add the file system entries */
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+ if (!error)
+ fsinst->installed = 1;
+ fsinst++;
+ }
+
+ /* Any errors? uninstall */
+ if (error) {
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+ }
+
+ return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs removing.");
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+ remove_proc_entry("yaffs", YPROC_ROOT);
+#endif
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+}
+
+module_init(init_yaffs_fs)
+ module_exit(exit_yaffs_fs)
+
+ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011");
+MODULE_LICENSE("GPL");
diff --git a/fs/yaffs2/yaffs_yaffs1.c b/fs/yaffs2/yaffs_yaffs1.c
new file mode 100644
index 000000000000..d277e20e2a55
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.c
@@ -0,0 +1,422 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int result;
+ int chunk;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ LIST_HEAD(hard_list);
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int alloc_failed = 0;
+ struct yaffs_shadow_fixer *shadow_fixers = NULL;
+ u8 *chunk_data;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs1_scan starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ chunk_data = yaffs_get_temp_buffer(dev);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ }
+ bi++;
+ }
+
+ /* For each block.... */
+ for (blk = dev->internal_start_block;
+ !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+ cond_resched();
+
+ bi = yaffs_get_block_info(dev, blk);
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ for (c = 0;
+ !alloc_failed && c < dev->param.chunks_per_block &&
+ state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) {
+ /* Read the tags and decide what to do */
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED ||
+ tags.is_deleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+ deleted++;
+ dev->n_free_chunks++;
+ } else if (!tags.chunk_used) {
+ /* An unassigned chunk in the block
+ * This means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (c == 0) {
+ /* We're looking at the first chunk in
+ *the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ /* this is the block being allocated */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->alloc_block_finder = blk;
+
+ }
+
+ dev->n_free_chunks +=
+ (dev->param.chunks_per_block - c);
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ /* PutChunkIntoFile checks for a clash
+ * (two data chunks with the same chunk_id).
+ */
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in) {
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, 1))
+ alloc_failed = 1;
+ }
+
+ endpos =
+ (tags.chunk_id - 1) *
+ dev->data_bytes_per_chunk +
+ tags.n_bytes;
+ if (in &&
+ in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE &&
+ in->variant.file_variant.scanned_size <
+ endpos) {
+ in->variant.file_variant.scanned_size =
+ endpos;
+ if (!dev->param.use_header_file_size) {
+ in->variant.
+ file_variant.file_size =
+ in->variant.
+ file_variant.scanned_size;
+ }
+
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Make the object
+ */
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in = yaffs_find_by_number(dev, tags.obj_id);
+ if (in && in->variant_type != oh->type) {
+ /* This should not happen, but somehow
+ * Wev'e ended up with an obj_id that
+ * has been reused but not yet deleted,
+ * and worse still it has changed type.
+ * Delete the old object.
+ */
+
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ oh->type);
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadows_obj > 0) {
+
+ struct yaffs_shadow_fixer *fixer;
+ fixer =
+ kmalloc(sizeof
+ (struct yaffs_shadow_fixer),
+ GFP_NOFS);
+ if (fixer) {
+ fixer->next = shadow_fixers;
+ shadow_fixers = fixer;
+ fixer->obj_id = tags.obj_id;
+ fixer->shadowed_id =
+ oh->shadows_obj;
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Shadow fixer: %d shadows %d",
+ fixer->obj_id,
+ fixer->shadowed_id);
+
+ }
+
+ }
+
+ if (in && in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate and need to
+ * resolve it. */
+
+ unsigned existing_serial = in->serial;
+ unsigned new_serial =
+ tags.serial_number;
+
+ if (((existing_serial + 1) & 3) ==
+ new_serial) {
+ /* Use new one - destroy the
+ * exisiting one */
+ yaffs_chunk_del(dev,
+ in->hdr_chunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+ /* Use existing - destroy
+ * this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+ }
+ }
+
+ if (in && !in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle
+ * with directory structure */
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (!parent)
+ alloc_failed = 1;
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.
+ children);
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, a problem....
+ * We're trying to use a
+ * non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (dev->param.
+ use_header_file_size)
+ in->variant.
+ file_variant.file_size
+ = yaffs_oh_to_size(oh);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.
+ hardlink_variant.equiv_id =
+ oh->equiv_id;
+ list_add(&in->hard_links,
+ &hard_list);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.
+ alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.
+ symlink_variant.alias)
+ alloc_failed = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* If we got this far while scanning,
+ * then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ /* If the block was partially allocated then
+ * treat it as fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add
+ * these hardlinks.
+ */
+
+ yaffs_link_fixup(dev, &hard_list);
+
+ /*
+ * Fix up any shadowed objects.
+ * There should not be more than one of these.
+ */
+ {
+ struct yaffs_shadow_fixer *fixer;
+ struct yaffs_obj *obj;
+
+ while (shadow_fixers) {
+ fixer = shadow_fixers;
+ shadow_fixers = fixer->next;
+ /* Complete the rename transaction by deleting the
+ * shadowed object then setting the object header
+ to unshadowed.
+ */
+ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+ if (obj)
+ yaffs_del_obj(obj);
+
+ obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+ if (obj)
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ kfree(fixer);
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs1.h b/fs/yaffs2/yaffs_yaffs1.h
new file mode 100644
index 000000000000..97e2fdd08a5d
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_yaffs2.c b/fs/yaffs2/yaffs_yaffs2.c
new file mode 100644
index 000000000000..f1dc972276f7
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.c
@@ -0,0 +1,1532 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ int i;
+ unsigned seq;
+ unsigned block_no = 0;
+ struct yaffs_block_info *b;
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ /* Find the oldest dirty sequence number. */
+ seq = dev->seq_number + 1;
+ b = dev->block_info;
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+ (b->pages_in_use - b->soft_del_pages) <
+ dev->param.chunks_per_block &&
+ b->seq_number < seq) {
+ seq = b->seq_number;
+ block_no = i;
+ }
+ b++;
+ }
+
+ if (block_no) {
+ dev->oldest_dirty_seq = seq;
+ dev->oldest_dirty_block = block_no;
+ }
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->oldest_dirty_seq)
+ yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+ }
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (dev->oldest_dirty_seq) {
+ if (dev->oldest_dirty_seq > bi->seq_number) {
+ dev->oldest_dirty_seq = bi->seq_number;
+ dev->oldest_dirty_block = block_no;
+ }
+ }
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return 1; /* disqualification only applies to yaffs2. */
+
+ if (!bi->has_shrink_hdr)
+ return 1; /* can gc */
+
+ yaffs2_find_oldest_dirty_seq(dev);
+
+ /* Can't do gc of this block if there are any blocks older than this
+ * one that have discarded pages.
+ */
+ return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev)
+{
+ u32 b;
+ u32 oldest = 0;
+ u32 oldest_seq = 0;
+ struct yaffs_block_info *bi;
+
+ if (!dev->param.is_yaffs2)
+ return oldest;
+
+ /*
+ * If refresh period < 10 then refreshing is disabled.
+ */
+ if (dev->param.refresh_period < 10)
+ return oldest;
+
+ /*
+ * Fix broken values.
+ */
+ if (dev->refresh_skip > dev->param.refresh_period)
+ dev->refresh_skip = dev->param.refresh_period;
+
+ if (dev->refresh_skip > 0)
+ return oldest;
+
+ /*
+ * Refresh skip is now zero.
+ * We'll do a refresh this time around....
+ * Update the refresh skip and find the oldest block.
+ */
+ dev->refresh_skip = dev->param.refresh_period;
+ dev->refresh_count++;
+ bi = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+ if (oldest < 1 || bi->seq_number < oldest_seq) {
+ oldest = b;
+ oldest_seq = bi->seq_number;
+ }
+ }
+ bi++;
+ }
+
+ if (oldest > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC refresh count %d selected block %d with seq_number %d",
+ dev->refresh_count, oldest, oldest_seq);
+ }
+
+ return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+ int nblocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ return !dev->param.skip_checkpt_wr &&
+ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+ int retval;
+ int n_bytes = 0;
+ int n_blocks;
+ int dev_blocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+ /* Not a valid value so recalculate */
+ dev_blocks = dev->param.end_block - dev->param.start_block + 1;
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(struct yaffs_checkpt_dev);
+ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+ n_bytes += dev_blocks * dev->chunk_bit_stride;
+ n_bytes +=
+ (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) *
+ dev->n_obj;
+ n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes;
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(u32); /* checksum */
+
+ /* Round up and add 2 blocks to allow for some bad blocks,
+ * so add 3 */
+
+ n_blocks =
+ (n_bytes /
+ (dev->data_bytes_per_chunk *
+ dev->param.chunks_per_block)) + 3;
+
+ dev->checkpoint_blocks_required = n_blocks;
+ }
+
+ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+ if (retval < 0)
+ retval = 0;
+ return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.struct_type = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ if (ok)
+ ok = (cp.struct_type == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+ (cp.head == ((head) ? 1 : 0));
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+ struct yaffs_dev *dev)
+{
+ cp->n_erased_blocks = dev->n_erased_blocks;
+ cp->alloc_block = dev->alloc_block;
+ cp->alloc_page = dev->alloc_page;
+ cp->n_free_chunks = dev->n_free_chunks;
+
+ cp->n_deleted_files = dev->n_deleted_files;
+ cp->n_unlinked_files = dev->n_unlinked_files;
+ cp->n_bg_deletions = dev->n_bg_deletions;
+ cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+ struct yaffs_checkpt_dev *cp)
+{
+ dev->n_erased_blocks = cp->n_erased_blocks;
+ dev->alloc_block = cp->alloc_block;
+ dev->alloc_page = cp->alloc_page;
+ dev->n_free_chunks = cp->n_free_chunks;
+
+ dev->n_deleted_files = cp->n_deleted_files;
+ dev->n_unlinked_files = cp->n_unlinked_files;
+ dev->n_bg_deletions = cp->n_bg_deletions;
+ dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ int ok;
+
+ /* Write device runtime values */
+ yaffs2_dev_to_checkpt_dev(&cp, dev);
+ cp.struct_type = sizeof(cp);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ /* Write block info */
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
+ if (!ok)
+ return 0;
+
+ /* Write chunk bits */
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ if (cp.struct_type != sizeof(cp))
+ return 0;
+
+ yaffs_checkpt_dev_to_dev(dev, &cp);
+
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+ if (!ok)
+ return 0;
+
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+
+ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+ struct yaffs_obj *obj)
+{
+ cp->obj_id = obj->obj_id;
+ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+ cp->hdr_chunk = obj->hdr_chunk;
+ cp->variant_type = obj->variant_type;
+ cp->deleted = obj->deleted;
+ cp->soft_del = obj->soft_del;
+ cp->unlinked = obj->unlinked;
+ cp->fake = obj->fake;
+ cp->rename_allowed = obj->rename_allowed;
+ cp->unlink_allowed = obj->unlink_allowed;
+ cp->serial = obj->serial;
+ cp->n_data_chunks = obj->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+ struct yaffs_checkpt_obj *cp)
+{
+ struct yaffs_obj *parent;
+
+ if (obj->variant_type != cp->variant_type) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+ cp->obj_id, cp->variant_type, cp->hdr_chunk,
+ obj->variant_type);
+ return 0;
+ }
+
+ obj->obj_id = cp->obj_id;
+
+ if (cp->parent_id)
+ parent = yaffs_find_or_create_by_number(obj->my_dev,
+ cp->parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ else
+ parent = NULL;
+
+ if (parent) {
+ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+ cp->obj_id, cp->parent_id,
+ cp->variant_type, cp->hdr_chunk,
+ parent->variant_type);
+ return 0;
+ }
+ yaffs_add_obj_to_dir(parent, obj);
+ }
+
+ obj->hdr_chunk = cp->hdr_chunk;
+ obj->variant_type = cp->variant_type;
+ obj->deleted = cp->deleted;
+ obj->soft_del = cp->soft_del;
+ obj->unlinked = cp->unlinked;
+ obj->fake = cp->fake;
+ obj->rename_allowed = cp->rename_allowed;
+ obj->unlink_allowed = cp->unlink_allowed;
+ obj->serial = cp->serial;
+ obj->n_data_chunks = cp->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+ if (obj->hdr_chunk > 0)
+ obj->lazy_loaded = 1;
+ return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+ struct yaffs_tnode *tn, u32 level,
+ int chunk_offset)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+ int ok = 1;
+ u32 base_offset;
+
+ if (!tn)
+ return 1;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (!tn->internal[i])
+ continue;
+ ok = yaffs2_checkpt_tnode_worker(in,
+ tn->internal[i],
+ level - 1,
+ (chunk_offset <<
+ YAFFS_TNODES_INTERNAL_BITS) + i);
+ }
+ return ok;
+ }
+
+ /* Level 0 tnode */
+ base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+ ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) ==
+ sizeof(base_offset));
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+
+ return ok;
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 end_marker = ~0;
+ int ok = 1;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return ok;
+
+ ok = yaffs2_checkpt_tnode_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.file_variant.
+ top_level, 0);
+ if (ok)
+ ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker,
+ sizeof(end_marker)) == sizeof(end_marker));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 base_chunk;
+ int ok = 1;
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+ struct yaffs_tnode *tn;
+ int nread = 0;
+
+ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+ sizeof(base_chunk));
+
+ while (ok && (~base_chunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+ tn = yaffs_get_tnode(dev);
+ if (tn)
+ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+ else
+ ok = 0;
+
+ if (tn && ok)
+ ok = yaffs_add_find_tnode_0(dev,
+ file_stuct_ptr,
+ base_chunk, tn) ? 1 : 0;
+
+ if (ok)
+ ok = (yaffs2_checkpt_rd
+ (dev, &base_chunk,
+ sizeof(base_chunk)) == sizeof(base_chunk));
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read tnodes %d records, last %d. ok %d",
+ nread, base_chunk, ok);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int i;
+ int ok = 1;
+ struct list_head *lh;
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ if (!obj->defered_free) {
+ yaffs2_obj_checkpt_obj(&cp, obj);
+ cp.struct_type = sizeof(cp);
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+ cp.obj_id, cp.parent_id,
+ cp.variant_type, cp.hdr_chunk, obj);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp,
+ sizeof(cp)) == sizeof(cp));
+
+ if (ok &&
+ obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs2_wr_checkpt_tnodes(obj);
+ }
+ }
+ }
+
+ /* Dump end of list */
+ memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj));
+ cp.struct_type = sizeof(cp);
+
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int ok = 1;
+ int done = 0;
+ LIST_HEAD(hard_list);
+
+
+ while (ok && !done) {
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (cp.struct_type != sizeof(cp)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "struct size %d instead of %d ok %d",
+ cp.struct_type, (int)sizeof(cp), ok);
+ ok = 0;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read object %d parent %d type %d chunk %d ",
+ cp.obj_id, cp.parent_id, cp.variant_type,
+ cp.hdr_chunk);
+
+ if (ok && cp.obj_id == ~0) {
+ done = 1;
+ } else if (ok) {
+ obj =
+ yaffs_find_or_create_by_number(dev, cp.obj_id,
+ cp.variant_type);
+ if (obj) {
+ ok = yaffs2_checkpt_obj_to_obj(obj, &cp);
+ if (!ok)
+ break;
+ if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_rd_checkpt_tnodes(obj);
+ } else if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_HARDLINK) {
+ list_add(&obj->hard_links, &hard_list);
+ }
+ } else {
+ ok = 0;
+ }
+ }
+ }
+
+ if (ok)
+ yaffs_link_fixup(dev, &hard_list);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+ sizeof(checkpt_sum));
+
+ if (!ok)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum0;
+ u32 checkpt_sum1;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+ sizeof(checkpt_sum1));
+
+ if (!ok)
+ return 0;
+
+ if (checkpt_sum0 != checkpt_sum1)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!yaffs2_checkpt_required(dev)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint write");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 1);
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint device");
+ ok = yaffs2_wr_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint objects");
+ ok = yaffs2_wr_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok)
+ ok = yaffs2_wr_checkpt_sum(dev);
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!dev->param.is_yaffs2)
+ ok = 0;
+
+ if (ok && dev->param.skip_checkpt_rd) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint read");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint device");
+ ok = yaffs2_rd_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint objects");
+ ok = yaffs2_rd_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok) {
+ ok = yaffs2_rd_checkpt_sum(dev);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint checksum %d", ok);
+ }
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return ok ? 1 : 0;
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+ dev->is_checkpointed = 0;
+ yaffs2_checkpt_invalidate_stream(dev);
+ }
+ if (dev->param.sb_dirty_fn)
+ dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "save entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+
+ if (!dev->is_checkpointed) {
+ yaffs2_checkpt_invalidate(dev);
+ yaffs2_wr_checkpt_data(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "save exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ retval = yaffs2_rd_checkpt_data(dev);
+
+ if (dev->is_checkpointed) {
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+ /* if new_size > old_file_size.
+ * We're going to be writing a hole.
+ * If the hole is small then write zeros otherwise write a start
+ * of hole marker.
+ */
+ loff_t old_file_size;
+ loff_t increase;
+ int small_hole;
+ int result = YAFFS_OK;
+ struct yaffs_dev *dev = NULL;
+ u8 *local_buffer = NULL;
+ int small_increase_ok = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ dev = obj->my_dev;
+
+ /* Bail out if not yaffs2 mode */
+ if (!dev->param.is_yaffs2)
+ return YAFFS_OK;
+
+ old_file_size = obj->variant.file_variant.file_size;
+
+ if (new_size <= old_file_size)
+ return YAFFS_OK;
+
+ increase = new_size - old_file_size;
+
+ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+ small_hole = 1;
+ else
+ small_hole = 0;
+
+ if (small_hole)
+ local_buffer = yaffs_get_temp_buffer(dev);
+
+ if (local_buffer) {
+ /* fill hole with zero bytes */
+ loff_t pos = old_file_size;
+ int this_write;
+ int written;
+ memset(local_buffer, 0, dev->data_bytes_per_chunk);
+ small_increase_ok = 1;
+
+ while (increase > 0 && small_increase_ok) {
+ this_write = increase;
+ if (this_write > dev->data_bytes_per_chunk)
+ this_write = dev->data_bytes_per_chunk;
+ written =
+ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+ 0);
+ if (written == this_write) {
+ pos += this_write;
+ increase -= this_write;
+ } else {
+ small_increase_ok = 0;
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+
+ /* If out of space then reverse any chunks we've added */
+ if (!small_increase_ok)
+ yaffs_resize_file_down(obj, old_file_size);
+ }
+
+ if (!small_increase_ok &&
+ obj->parent &&
+ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+ /* Write a hole start header with the old file size */
+ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+ }
+
+ return result;
+}
+
+struct yaffs_block_index {
+ int seq;
+ int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+ int aseq = ((struct yaffs_block_index *)a)->seq;
+ int bseq = ((struct yaffs_block_index *)b)->seq;
+ int ablock = ((struct yaffs_block_index *)a)->block;
+ int bblock = ((struct yaffs_block_index *)b)->block;
+
+ if (aseq == bseq)
+ return ablock - bblock;
+
+ return aseq - bseq;
+}
+
+static inline int yaffs2_scan_chunk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi,
+ int blk, int chunk_in_block,
+ int *found_chunks,
+ u8 *chunk_data,
+ struct list_head *hard_list,
+ int summary_available)
+{
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int equiv_id;
+ loff_t file_size;
+ int is_shrink;
+ int is_unlinked;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+ int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
+ struct yaffs_file_var *file_var;
+ struct yaffs_hardlink_var *hl_var;
+ struct yaffs_symlink_var *sl_var;
+
+ if (summary_available) {
+ result = yaffs_summary_fetch(dev, &tags, chunk_in_block);
+ tags.seq_number = bi->seq_number;
+ }
+
+ if (!summary_available || tags.obj_id == 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
+ dev->tags_used++;
+ } else {
+ dev->summary_used++;
+ }
+
+ /* Let's have a good look at this chunk... */
+
+ if (!tags.chunk_used) {
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing
+ * the erased check. Just skip it so that it can
+ * be deleted.
+ * But, more typically, We get here when this is
+ * an unallocated chunk and his means that
+ * either the block is empty or this is the one
+ * being allocated from
+ */
+
+ if (*found_chunks) {
+ /* This is a chunk that was skipped due
+ * to failing the erased check */
+ } else if (chunk_in_block == 0) {
+ /* We're looking at the first chunk in
+ * the block so the block is unused */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ if (dev->seq_number == bi->seq_number) {
+ /* Allocating from this block*/
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, chunk_in_block);
+
+ bi->block_state =
+ YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = chunk_in_block;
+ dev->alloc_block_finder = blk;
+ } else {
+ /* This is a partially written block
+ * that is not the current
+ * allocation block.
+ */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Partially written block %d detected. gc will fix this.",
+ blk);
+ }
+ }
+ }
+
+ dev->n_free_chunks++;
+
+ } else if (tags.ecc_result ==
+ YAFFS_ECC_RESULT_UNFIXED) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Unfixed ECC in chunk(%d:%d), chunk ignored",
+ blk, chunk_in_block);
+ dev->n_free_chunks++;
+ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+ tags.obj_id == YAFFS_OBJECTID_SUMMARY ||
+ (tags.chunk_id > 0 &&
+ tags.n_bytes > dev->data_bytes_per_chunk) ||
+ tags.seq_number != bi->seq_number) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+ blk, chunk_in_block, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+ dev->n_free_chunks++;
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ loff_t endpos;
+ loff_t chunk_base = (tags.chunk_id - 1) *
+ dev->data_bytes_per_chunk;
+
+ *found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ if (!in)
+ /* Out of memory */
+ alloc_failed = 1;
+
+ if (in &&
+ in->variant_type == YAFFS_OBJECT_TYPE_FILE &&
+ chunk_base < in->variant.file_variant.shrink_size) {
+ /* This has not been invalidated by
+ * a resize */
+ if (!yaffs_put_chunk_in_file(in, tags.chunk_id,
+ chunk, -1))
+ alloc_failed = 1;
+
+ /* File size is calculated by looking at
+ * the data chunks if we have not
+ * seen an object header yet.
+ * Stop this practice once we find an
+ * object header.
+ */
+ endpos = chunk_base + tags.n_bytes;
+
+ if (!in->valid &&
+ in->variant.file_variant.scanned_size < endpos) {
+ in->variant.file_variant.
+ scanned_size = endpos;
+ in->variant.file_variant.
+ file_size = endpos;
+ }
+ } else if (in) {
+ /* This chunk has been invalidated by a
+ * resize, or a past file deletion
+ * so delete the chunk*/
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make
+ * the object
+ */
+ *found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+
+ oh = NULL;
+ in = NULL;
+
+ if (tags.extra_available) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ tags.extra_obj_type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ if (!in ||
+ (!in->valid && dev->param.disable_lazy_load) ||
+ tags.extra_shadows ||
+ (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
+
+ /* If we don't have valid info then we
+ * need to read the chunk
+ * TODO In future we can probably defer
+ * reading the chunk and living with
+ * invalid data until needed.
+ */
+
+ result = yaffs_rd_chunk_tags_nand(dev,
+ chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ if (dev->param.inband_tags) {
+ /* Fix up the header if they got
+ * corrupted by inband tags */
+ oh->shadows_obj =
+ oh->inband_shadowed_obj_id;
+ oh->is_shrink =
+ oh->inband_is_shrink;
+ }
+
+ if (!in) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id, oh->type);
+ if (!in)
+ alloc_failed = 1;
+ }
+ }
+
+ if (!in) {
+ /* TODO Hoosterman we have a problem! */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
+ tags.obj_id, chunk);
+ return YAFFS_FAIL;
+ }
+
+ if (in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate that will be
+ * discarded, but we first have to suck
+ * out resize info if it is a file.
+ */
+ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
+ ((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+ (tags.extra_available &&
+ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ )) {
+ loff_t this_size = (oh) ?
+ yaffs_oh_to_size(oh) :
+ tags.extra_file_size;
+ u32 parent_obj_id = (oh) ?
+ oh->parent_obj_id :
+ tags.extra_parent_id;
+
+ is_shrink = (oh) ?
+ oh->is_shrink :
+ tags.extra_is_shrink;
+
+ /* If it is deleted (unlinked
+ * at start also means deleted)
+ * we treat the file size as
+ * being zeroed at this point.
+ */
+ if (parent_obj_id == YAFFS_OBJECTID_DELETED ||
+ parent_obj_id == YAFFS_OBJECTID_UNLINKED) {
+ this_size = 0;
+ is_shrink = 1;
+ }
+
+ if (is_shrink &&
+ in->variant.file_variant.shrink_size >
+ this_size)
+ in->variant.file_variant.shrink_size =
+ this_size;
+
+ if (is_shrink)
+ bi->has_shrink_hdr = 1;
+ }
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ }
+
+ if (!in->valid && in->variant_type !=
+ (oh ? oh->type : tags.extra_obj_type))
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
+ oh ? oh->type : tags.extra_obj_type,
+ in->variant_type, tags.obj_id,
+ chunk);
+
+ if (!in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle
+ * with directory structure */
+ in->valid = 1;
+
+ if (oh) {
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->lazy_loaded = 0;
+ } else {
+ in->lazy_loaded = 1;
+ }
+ in->hdr_chunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+ in->valid = 1;
+ in->hdr_chunk = chunk;
+ if (oh) {
+ in->variant_type = oh->type;
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+
+ if (oh->shadows_obj > 0)
+ yaffs_handle_shadowed_obj(dev,
+ oh->shadows_obj, 1);
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ parent = yaffs_find_or_create_by_number(dev,
+ oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = yaffs_oh_to_size(oh);
+ is_shrink = oh->is_shrink;
+ equiv_id = oh->equiv_id;
+ } else {
+ in->variant_type = tags.extra_obj_type;
+ parent = yaffs_find_or_create_by_number(dev,
+ tags.extra_parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = tags.extra_file_size;
+ is_shrink = tags.extra_is_shrink;
+ equiv_id = tags.extra_equiv_id;
+ in->lazy_loaded = 1;
+ }
+ in->dirty = 0;
+
+ if (!parent)
+ alloc_failed = 1;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ if (parent &&
+ parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * Trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+ yaffs_add_obj_to_dir(parent, in);
+
+ is_unlinked = (parent == dev->del_dir) ||
+ (parent == dev->unlinked_dir);
+
+ if (is_shrink)
+ /* Mark the block */
+ bi->has_shrink_hdr = 1;
+
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent
+ * object is scanned we put them all in a list.
+ * After scanning is complete, we should have all the
+ * objects, so we run through this list and fix up all
+ * the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ file_var = &in->variant.file_variant;
+ if (file_var->scanned_size < file_size) {
+ /* This covers the case where the file
+ * size is greater than the data held.
+ * This will happen if the file is
+ * resized to be larger than its
+ * current data extents.
+ */
+ file_var->file_size = file_size;
+ file_var->scanned_size = file_size;
+ }
+
+ if (file_var->shrink_size > file_size)
+ file_var->shrink_size = file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ hl_var = &in->variant.hardlink_variant;
+ if (!is_unlinked) {
+ hl_var->equiv_id = equiv_id;
+ list_add(&in->hard_links, hard_list);
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ sl_var = &in->variant.symlink_variant;
+ if (oh) {
+ sl_var->alias =
+ yaffs_clone_str(oh->alias);
+ if (!sl_var->alias)
+ alloc_failed = 1;
+ }
+ break;
+ }
+ }
+ }
+ return alloc_failed ? YAFFS_FAIL : YAFFS_OK;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+ int blk;
+ int block_iter;
+ int start_iter;
+ int end_iter;
+ int n_to_scan = 0;
+ enum yaffs_block_state state;
+ int c;
+ int deleted;
+ LIST_HEAD(hard_list);
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ u8 *chunk_data;
+ int found_chunks;
+ int alloc_failed = 0;
+ struct yaffs_block_index *block_index = NULL;
+ int alt_block_index = 0;
+ int summary_available;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ block_index =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS);
+
+ if (!block_index) {
+ block_index =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+ alt_block_index = 1;
+ }
+
+ if (!block_index) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards() could not allocate block index!"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ chunk_data = yaffs_get_temp_buffer(dev);
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, bi->block_state, seq_number);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocks_in_checkpt++;
+
+ } else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else if (bi->block_state ==
+ YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* Determine the highest sequence number */
+ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+ block_index[n_to_scan].seq = seq_number;
+ block_index[n_to_scan].block = blk;
+ n_to_scan++;
+ if (seq_number >= dev->seq_number)
+ dev->seq_number = seq_number;
+ } else {
+ /* TODO: Nasty sequence number! */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Block scanning block %d has bad sequence number %d",
+ blk, seq_number);
+ }
+ }
+ bi++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
+
+ cond_resched();
+
+ /* Sort the blocks by sequence number */
+ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+ yaffs2_ybicmp, NULL);
+
+ cond_resched();
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+ /* Now scan the blocks looking at the data. */
+ start_iter = 0;
+ end_iter = n_to_scan - 1;
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+ /* For each block.... backwards */
+ for (block_iter = end_iter;
+ !alloc_failed && block_iter >= start_iter;
+ block_iter--) {
+ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+ cond_resched();
+
+ /* get the block to scan in the correct order */
+ blk = block_index[block_iter].block;
+ bi = yaffs_get_block_info(dev, blk);
+ deleted = 0;
+
+ summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);
+
+ /* For each chunk in each block that needs scanning.... */
+ found_chunks = 0;
+ if (summary_available)
+ c = dev->chunks_per_summary - 1;
+ else
+ c = dev->param.chunks_per_block - 1;
+
+ for (/* c is already initialised */;
+ !alloc_failed && c >= 0 &&
+ (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING);
+ c--) {
+ /* Scan backwards...
+ * Read the tags and decide what to do
+ */
+ if (yaffs2_scan_chunk(dev, bi, blk, c,
+ &found_chunks, chunk_data,
+ &hard_list, summary_available) ==
+ YAFFS_FAIL)
+ alloc_failed = 1;
+ }
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* If we got this far while scanning, then the block
+ * is fully allocated. */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+ }
+
+ yaffs_skip_rest_of_block(dev);
+
+ if (alt_block_index)
+ vfree(block_index);
+ else
+ kfree(block_index);
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+ yaffs_link_fixup(dev, &hard_list);
+
+ yaffs_release_temp_buffer(dev, chunk_data);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs2.h b/fs/yaffs2/yaffs_yaffs2.h
new file mode 100644
index 000000000000..2363bfd8bcab
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h
new file mode 100644
index 000000000000..0e4e64e45f47
--- /dev/null
+++ b/fs/yaffs2/yportenv.h
@@ -0,0 +1,85 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YPORTENV_H__
+#define __YPORTENV_H__
+
+/*
+ * Define the MTD version in terms of Linux Kernel versions
+ * This allows yaffs to be used independantly of the kernel
+ * as well as with it.
+ */
+
+#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+
+#ifdef YAFFS_OUT_OF_TREE
+#include "moduleconfig.h"
+#endif
+
+#include <linux/version.h>
+#define MTD_VERSION_CODE LINUX_VERSION_CODE
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/xattr.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sort.h>
+#include <linux/bitops.h>
+
+/* These type wrappings are used to support Unicode names in WinCE. */
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+
+#define YAFFS_ROOT_MODE 0755
+#define YAFFS_LOSTNFOUND_MODE 0700
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#define Y_CURRENT_TIME ktime_get_real_seconds()
+#define Y_TIME_CONVERT(x) (x).tv_sec
+#else
+#define Y_CURRENT_TIME CURRENT_TIME
+#define Y_TIME_CONVERT(x) (x)
+#endif
+
+#define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+
+#define yaffs_printf(msk, fmt, ...) \
+ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__)
+
+#define yaffs_trace(msk, fmt, ...) do { \
+ if (yaffs_trace_mask & (msk)) \
+ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while (0)
+
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 09ce2646c78a..efb9e7e2fc77 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1319,6 +1319,7 @@ extern void fasync_free(struct fasync_struct *);
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
+extern int setfl(int fd, struct file * filp, unsigned long arg);
extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
extern int f_setown(struct file *filp, unsigned long arg, int force);
extern void f_delown(struct file *filp);
@@ -1812,6 +1813,7 @@ struct file_operations {
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
+ int (*setfl)(struct file *, unsigned long);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
@@ -1882,6 +1884,12 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec *fast_pointer,
struct iovec **ret_pointer);
+typedef ssize_t (*vfs_readf_t)(struct file *, char __user *, size_t, loff_t *);
+typedef ssize_t (*vfs_writef_t)(struct file *, const char __user *, size_t,
+ loff_t *);
+vfs_readf_t vfs_readf(struct file *file);
+vfs_writef_t vfs_writef(struct file *file);
+
extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
@@ -2307,6 +2315,7 @@ extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
extern int generic_update_time(struct inode *, struct timespec64 *, int);
+extern int update_time(struct inode *, struct timespec64 *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2594,6 +2603,7 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb)
return false;
}
#endif
+extern int __sync_filesystem(struct super_block *, int);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c5335df2372f..4e48a5059536 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -306,6 +306,8 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
return lock->key == key;
}
+struct lock_class *lockdep_hlock_class(struct held_lock *hlock);
+
/*
* Acquire a lock.
*
@@ -432,6 +434,7 @@ struct lockdep_map { };
#define lockdep_depth(tsk) (0)
+#define lockdep_is_held(lock) (1)
#define lockdep_is_held_type(l, r) (1)
#define lockdep_assert_held(l) do { (void)(l); } while (0)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7000ddd807e0..3e7448ef2a44 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1531,6 +1531,28 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
+extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int);
+extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[],
+ int);
+extern void vma_do_get_file(struct vm_area_struct *, const char[], int);
+extern void vma_do_fput(struct vm_area_struct *, const char[], int);
+
+#define vma_file_update_time(vma) vma_do_file_update_time(vma, __func__, \
+ __LINE__)
+#define vma_pr_or_file(vma) vma_do_pr_or_file(vma, __func__, \
+ __LINE__)
+#define vma_get_file(vma) vma_do_get_file(vma, __func__, __LINE__)
+#define vma_fput(vma) vma_do_fput(vma, __func__, __LINE__)
+
+#ifndef CONFIG_MMU
+extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int);
+extern void vmr_do_fput(struct vm_region *, const char[], int);
+
+#define vmr_pr_or_file(region) vmr_do_pr_or_file(region, __func__, \
+ __LINE__)
+#define vmr_fput(region) vmr_do_fput(region, __func__, __LINE__)
+#endif /* !CONFIG_MMU */
+
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 2c471a2c43fa..c6e7aa6996ff 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -244,6 +244,7 @@ struct vm_region {
unsigned long vm_top; /* region allocated to here */
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
+ struct file *vm_prfile; /* the virtual backing file or NULL */
int vm_usage; /* region usage count (access under nommu_region_sem) */
bool vm_icache_flushed : 1; /* true if the icache has been flushed for
@@ -318,6 +319,7 @@ struct vm_area_struct {
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
units */
struct file * vm_file; /* File we map to (can be NULL). */
+ struct file *vm_prfile; /* shadow of vm_file */
void * vm_private_data; /* was vm_pte (shared mem) */
atomic_long_t swap_readahead_info;
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 35942084cd40..24f5fd1a789d 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -6,11 +6,14 @@
struct mnt_namespace;
struct fs_struct;
struct user_namespace;
+struct vfsmount;
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
+extern int is_current_mnt_ns(struct vfsmount *mnt);
+
extern const struct file_operations proc_mounts_operations;
extern const struct file_operations proc_mountinfo_operations;
extern const struct file_operations proc_mountstats_operations;
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 6aa8cc83c3b6..f36a54c2b000 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -59,6 +59,7 @@ struct nfs_client {
u32 cl_minorversion;/* NFSv4 minorversion */
const char * cl_principal; /* used for machine cred */
+ int nfs_prog;
#if IS_ENABLED(CONFIG_NFS_V4)
struct list_head cl_ds_clients; /* auth flavor data servers */
@@ -81,6 +82,7 @@ struct nfs_client {
const char * cl_owner_id;
u32 cl_cb_ident; /* v4.0 callback identifier */
+
const struct nfs4_minor_version_ops *cl_mvops;
unsigned long cl_mig_gen;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 441a93ebcac0..a017294bc64b 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -12,7 +12,7 @@
* reasonable for NFS over UDP.
*/
#define NFS_MAX_FILE_IO_SIZE (1048576U)
-#define NFS_DEF_FILE_IO_SIZE (4096U)
+#define NFS_DEF_FILE_IO_SIZE CONFIG_NFS_DEF_FILE_IO_SIZE
#define NFS_MIN_FILE_IO_SIZE (1024U)
struct nfs4_string {
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 74b4911ac16d..19789fbea567 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -87,4 +87,10 @@ extern void splice_shrink_spd(struct splice_pipe_desc *);
extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
extern const struct pipe_buf_operations default_pipe_buf_ops;
+
+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
+extern long do_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
#endif
diff --git a/include/uapi/linux/aufs_type.h b/include/uapi/linux/aufs_type.h
new file mode 100644
index 000000000000..05ea4d5c4cbe
--- /dev/null
+++ b/include/uapi/linux/aufs_type.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2019 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __AUFS_TYPE_H__
+#define __AUFS_TYPE_H__
+
+#define AUFS_NAME "aufs"
+
+#ifdef __KERNEL__
+/*
+ * define it before including all other headers.
+ * sched.h may use pr_* macros before defining "current", so define the
+ * no-current version first, and re-define later.
+ */
+#define pr_fmt(fmt) AUFS_NAME " %s:%d: " fmt, __func__, __LINE__
+#include <linux/sched.h>
+#undef pr_fmt
+#define pr_fmt(fmt) \
+ AUFS_NAME " %s:%d:%.*s[%d]: " fmt, __func__, __LINE__, \
+ (int)sizeof(current->comm), current->comm, current->pid
+#else
+#include <stdint.h>
+#include <sys/types.h>
+#endif /* __KERNEL__ */
+
+#include <linux/limits.h>
+
+#define AUFS_VERSION "4.20.4+-20190211"
+
+/* todo? move this to linux-2.6.19/include/magic.h */
+#define AUFS_SUPER_MAGIC ('a' << 24 | 'u' << 16 | 'f' << 8 | 's')
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_BRANCH_MAX_127
+typedef int8_t aufs_bindex_t;
+#define AUFS_BRANCH_MAX 127
+#else
+typedef int16_t aufs_bindex_t;
+#ifdef CONFIG_AUFS_BRANCH_MAX_511
+#define AUFS_BRANCH_MAX 511
+#elif defined(CONFIG_AUFS_BRANCH_MAX_1023)
+#define AUFS_BRANCH_MAX 1023
+#elif defined(CONFIG_AUFS_BRANCH_MAX_32767)
+#define AUFS_BRANCH_MAX 32767
+#endif
+#endif
+
+#ifdef __KERNEL__
+#ifndef AUFS_BRANCH_MAX
+#error unknown CONFIG_AUFS_BRANCH_MAX value
+#endif
+#endif /* __KERNEL__ */
+
+/* ---------------------------------------------------------------------- */
+
+#define AUFS_FSTYPE AUFS_NAME
+
+#define AUFS_ROOT_INO 2
+#define AUFS_FIRST_INO 11
+
+#define AUFS_WH_PFX ".wh."
+#define AUFS_WH_PFX_LEN ((int)sizeof(AUFS_WH_PFX) - 1)
+#define AUFS_WH_TMP_LEN 4
+/* a limit for rmdir/rename a dir and copyup */
+#define AUFS_MAX_NAMELEN (NAME_MAX \
+ - AUFS_WH_PFX_LEN * 2 /* doubly whiteouted */\
+ - 1 /* dot */\
+ - AUFS_WH_TMP_LEN) /* hex */
+#define AUFS_XINO_FNAME "." AUFS_NAME ".xino"
+#define AUFS_XINO_DEFPATH "/tmp/" AUFS_XINO_FNAME
+#define AUFS_XINO_DEF_SEC 30 /* seconds */
+#define AUFS_XINO_DEF_TRUNC 45 /* percentage */
+#define AUFS_DIRWH_DEF 3
+#define AUFS_RDCACHE_DEF 10 /* seconds */
+#define AUFS_RDCACHE_MAX 3600 /* seconds */
+#define AUFS_RDBLK_DEF 512 /* bytes */
+#define AUFS_RDHASH_DEF 32
+#define AUFS_WKQ_NAME AUFS_NAME "d"
+#define AUFS_MFS_DEF_SEC 30 /* seconds */
+#define AUFS_MFS_MAX_SEC 3600 /* seconds */
+#define AUFS_FHSM_CACHE_DEF_SEC 30 /* seconds */
+#define AUFS_PLINK_WARN 50 /* number of plinks in a single bucket */
+
+/* pseudo-link maintenace under /proc */
+#define AUFS_PLINK_MAINT_NAME "plink_maint"
+#define AUFS_PLINK_MAINT_DIR "fs/" AUFS_NAME
+#define AUFS_PLINK_MAINT_PATH AUFS_PLINK_MAINT_DIR "/" AUFS_PLINK_MAINT_NAME
+
+/* dirren, renamed dir */
+#define AUFS_DR_INFO_PFX AUFS_WH_PFX ".dr."
+#define AUFS_DR_BRHINO_NAME AUFS_WH_PFX "hino"
+/* whiteouted doubly */
+#define AUFS_WH_DR_INFO_PFX AUFS_WH_PFX AUFS_DR_INFO_PFX
+#define AUFS_WH_DR_BRHINO AUFS_WH_PFX AUFS_DR_BRHINO_NAME
+
+#define AUFS_DIROPQ_NAME AUFS_WH_PFX ".opq" /* whiteouted doubly */
+#define AUFS_WH_DIROPQ AUFS_WH_PFX AUFS_DIROPQ_NAME
+
+#define AUFS_BASE_NAME AUFS_WH_PFX AUFS_NAME
+#define AUFS_PLINKDIR_NAME AUFS_WH_PFX "plnk"
+#define AUFS_ORPHDIR_NAME AUFS_WH_PFX "orph"
+
+/* doubly whiteouted */
+#define AUFS_WH_BASE AUFS_WH_PFX AUFS_BASE_NAME
+#define AUFS_WH_PLINKDIR AUFS_WH_PFX AUFS_PLINKDIR_NAME
+#define AUFS_WH_ORPHDIR AUFS_WH_PFX AUFS_ORPHDIR_NAME
+
+/* branch permissions and attributes */
+#define AUFS_BRPERM_RW "rw"
+#define AUFS_BRPERM_RO "ro"
+#define AUFS_BRPERM_RR "rr"
+#define AUFS_BRATTR_COO_REG "coo_reg"
+#define AUFS_BRATTR_COO_ALL "coo_all"
+#define AUFS_BRATTR_FHSM "fhsm"
+#define AUFS_BRATTR_UNPIN "unpin"
+#define AUFS_BRATTR_ICEX "icex"
+#define AUFS_BRATTR_ICEX_SEC "icexsec"
+#define AUFS_BRATTR_ICEX_SYS "icexsys"
+#define AUFS_BRATTR_ICEX_TR "icextr"
+#define AUFS_BRATTR_ICEX_USR "icexusr"
+#define AUFS_BRATTR_ICEX_OTH "icexoth"
+#define AUFS_BRRATTR_WH "wh"
+#define AUFS_BRWATTR_NLWH "nolwh"
+#define AUFS_BRWATTR_MOO "moo"
+
+#define AuBrPerm_RW 1 /* writable, hardlinkable wh */
+#define AuBrPerm_RO (1 << 1) /* readonly */
+#define AuBrPerm_RR (1 << 2) /* natively readonly */
+#define AuBrPerm_Mask (AuBrPerm_RW | AuBrPerm_RO | AuBrPerm_RR)
+
+#define AuBrAttr_COO_REG (1 << 3) /* copy-up on open */
+#define AuBrAttr_COO_ALL (1 << 4)
+#define AuBrAttr_COO_Mask (AuBrAttr_COO_REG | AuBrAttr_COO_ALL)
+
+#define AuBrAttr_FHSM (1 << 5) /* file-based hsm */
+#define AuBrAttr_UNPIN (1 << 6) /* rename-able top dir of
+ branch. meaningless since
+ linux-3.18-rc1 */
+
+/* ignore error in copying XATTR */
+#define AuBrAttr_ICEX_SEC (1 << 7)
+#define AuBrAttr_ICEX_SYS (1 << 8)
+#define AuBrAttr_ICEX_TR (1 << 9)
+#define AuBrAttr_ICEX_USR (1 << 10)
+#define AuBrAttr_ICEX_OTH (1 << 11)
+#define AuBrAttr_ICEX (AuBrAttr_ICEX_SEC \
+ | AuBrAttr_ICEX_SYS \
+ | AuBrAttr_ICEX_TR \
+ | AuBrAttr_ICEX_USR \
+ | AuBrAttr_ICEX_OTH)
+
+#define AuBrRAttr_WH (1 << 12) /* whiteout-able */
+#define AuBrRAttr_Mask AuBrRAttr_WH
+
+#define AuBrWAttr_NoLinkWH (1 << 13) /* un-hardlinkable whiteouts */
+#define AuBrWAttr_MOO (1 << 14) /* move-up on open */
+#define AuBrWAttr_Mask (AuBrWAttr_NoLinkWH | AuBrWAttr_MOO)
+
+#define AuBrAttr_CMOO_Mask (AuBrAttr_COO_Mask | AuBrWAttr_MOO)
+
+/* #warning test userspace */
+#ifdef __KERNEL__
+#ifndef CONFIG_AUFS_FHSM
+#undef AuBrAttr_FHSM
+#define AuBrAttr_FHSM 0
+#endif
+#ifndef CONFIG_AUFS_XATTR
+#undef AuBrAttr_ICEX
+#define AuBrAttr_ICEX 0
+#undef AuBrAttr_ICEX_SEC
+#define AuBrAttr_ICEX_SEC 0
+#undef AuBrAttr_ICEX_SYS
+#define AuBrAttr_ICEX_SYS 0
+#undef AuBrAttr_ICEX_TR
+#define AuBrAttr_ICEX_TR 0
+#undef AuBrAttr_ICEX_USR
+#define AuBrAttr_ICEX_USR 0
+#undef AuBrAttr_ICEX_OTH
+#define AuBrAttr_ICEX_OTH 0
+#endif
+#endif
+
+/* the longest combination */
+/* AUFS_BRATTR_ICEX and AUFS_BRATTR_ICEX_TR don't affect here */
+#define AuBrPermStrSz sizeof(AUFS_BRPERM_RW \
+ "+" AUFS_BRATTR_COO_REG \
+ "+" AUFS_BRATTR_FHSM \
+ "+" AUFS_BRATTR_UNPIN \
+ "+" AUFS_BRATTR_ICEX_SEC \
+ "+" AUFS_BRATTR_ICEX_SYS \
+ "+" AUFS_BRATTR_ICEX_USR \
+ "+" AUFS_BRATTR_ICEX_OTH \
+ "+" AUFS_BRWATTR_NLWH)
+
+typedef struct {
+ char a[AuBrPermStrSz];
+} au_br_perm_str_t;
+
+static inline int au_br_writable(int brperm)
+{
+ return brperm & AuBrPerm_RW;
+}
+
+static inline int au_br_whable(int brperm)
+{
+ return brperm & (AuBrPerm_RW | AuBrRAttr_WH);
+}
+
+static inline int au_br_wh_linkable(int brperm)
+{
+ return !(brperm & AuBrWAttr_NoLinkWH);
+}
+
+static inline int au_br_cmoo(int brperm)
+{
+ return brperm & AuBrAttr_CMOO_Mask;
+}
+
+static inline int au_br_fhsm(int brperm)
+{
+ return brperm & AuBrAttr_FHSM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* ioctl */
+enum {
+ /* readdir in userspace */
+ AuCtl_RDU,
+ AuCtl_RDU_INO,
+
+ AuCtl_WBR_FD, /* pathconf wrapper */
+ AuCtl_IBUSY, /* busy inode */
+ AuCtl_MVDOWN, /* move-down */
+ AuCtl_BR, /* info about branches */
+ AuCtl_FHSM_FD /* connection for fhsm */
+};
+
+/* borrowed from linux/include/linux/kernel.h */
+#ifndef ALIGN
+#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
+#endif
+
+/* borrowed from linux/include/linux/compiler-gcc3.h */
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
+#ifdef __KERNEL__
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+#endif
+
+struct au_rdu_cookie {
+ uint64_t h_pos;
+ int16_t bindex;
+ uint8_t flags;
+ uint8_t pad;
+ uint32_t generation;
+} __aligned(8);
+
+struct au_rdu_ent {
+ uint64_t ino;
+ int16_t bindex;
+ uint8_t type;
+ uint8_t nlen;
+ uint8_t wh;
+ char name[0];
+} __aligned(8);
+
+static inline int au_rdu_len(int nlen)
+{
+ /* include the terminating NULL */
+ return ALIGN(sizeof(struct au_rdu_ent) + nlen + 1,
+ sizeof(uint64_t));
+}
+
+union au_rdu_ent_ul {
+ struct au_rdu_ent __user *e;
+ uint64_t ul;
+};
+
+enum {
+ AufsCtlRduV_SZ,
+ AufsCtlRduV_End
+};
+
+struct aufs_rdu {
+ /* input */
+ union {
+ uint64_t sz; /* AuCtl_RDU */
+ uint64_t nent; /* AuCtl_RDU_INO */
+ };
+ union au_rdu_ent_ul ent;
+ uint16_t verify[AufsCtlRduV_End];
+
+ /* input/output */
+ uint32_t blk;
+
+ /* output */
+ union au_rdu_ent_ul tail;
+ /* number of entries which were added in a single call */
+ uint64_t rent;
+ uint8_t full;
+ uint8_t shwh;
+
+ struct au_rdu_cookie cookie;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+/* dirren. the branch is identified by the filename who contains this */
+struct au_drinfo {
+ uint64_t ino;
+ union {
+ uint8_t oldnamelen;
+ uint64_t _padding;
+ };
+ uint8_t oldname[0];
+} __aligned(8);
+
+struct au_drinfo_fdata {
+ uint32_t magic;
+ struct au_drinfo drinfo;
+} __aligned(8);
+
+#define AUFS_DRINFO_MAGIC_V1 ('a' << 24 | 'd' << 16 | 'r' << 8 | 0x01)
+/* future */
+#define AUFS_DRINFO_MAGIC_V2 ('a' << 24 | 'd' << 16 | 'r' << 8 | 0x02)
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_wbr_fd {
+ uint32_t oflags;
+ int16_t brid;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_ibusy {
+ uint64_t ino, h_ino;
+ int16_t bindex;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+/* error code for move-down */
+/* the actual message strings are implemented in aufs-util.git */
+enum {
+ EAU_MVDOWN_OPAQUE = 1,
+ EAU_MVDOWN_WHITEOUT,
+ EAU_MVDOWN_UPPER,
+ EAU_MVDOWN_BOTTOM,
+ EAU_MVDOWN_NOUPPER,
+ EAU_MVDOWN_NOLOWERBR,
+ EAU_Last
+};
+
+/* flags for move-down */
+#define AUFS_MVDOWN_DMSG 1
+#define AUFS_MVDOWN_OWLOWER (1 << 1) /* overwrite lower */
+#define AUFS_MVDOWN_KUPPER (1 << 2) /* keep upper */
+#define AUFS_MVDOWN_ROLOWER (1 << 3) /* do even if lower is RO */
+#define AUFS_MVDOWN_ROLOWER_R (1 << 4) /* did on lower RO */
+#define AUFS_MVDOWN_ROUPPER (1 << 5) /* do even if upper is RO */
+#define AUFS_MVDOWN_ROUPPER_R (1 << 6) /* did on upper RO */
+#define AUFS_MVDOWN_BRID_UPPER (1 << 7) /* upper brid */
+#define AUFS_MVDOWN_BRID_LOWER (1 << 8) /* lower brid */
+#define AUFS_MVDOWN_FHSM_LOWER (1 << 9) /* find fhsm attr for lower */
+#define AUFS_MVDOWN_STFS (1 << 10) /* req. stfs */
+#define AUFS_MVDOWN_STFS_FAILED (1 << 11) /* output: stfs is unusable */
+#define AUFS_MVDOWN_BOTTOM (1 << 12) /* output: no more lowers */
+
+/* index for move-down */
+enum {
+ AUFS_MVDOWN_UPPER,
+ AUFS_MVDOWN_LOWER,
+ AUFS_MVDOWN_NARRAY
+};
+
+/*
+ * additional info of move-down
+ * number of free blocks and inodes.
+ * subset of struct kstatfs, but smaller and always 64bit.
+ */
+struct aufs_stfs {
+ uint64_t f_blocks;
+ uint64_t f_bavail;
+ uint64_t f_files;
+ uint64_t f_ffree;
+};
+
+struct aufs_stbr {
+ int16_t brid; /* optional input */
+ int16_t bindex; /* output */
+ struct aufs_stfs stfs; /* output when AUFS_MVDOWN_STFS set */
+} __aligned(8);
+
+struct aufs_mvdown {
+ uint32_t flags; /* input/output */
+ struct aufs_stbr stbr[AUFS_MVDOWN_NARRAY]; /* input/output */
+ int8_t au_errno; /* output */
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+union aufs_brinfo {
+ /* PATH_MAX may differ between kernel-space and user-space */
+ char _spacer[4096];
+ struct {
+ int16_t id;
+ int perm;
+ char path[0];
+ };
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+#define AuCtlType 'A'
+#define AUFS_CTL_RDU _IOWR(AuCtlType, AuCtl_RDU, struct aufs_rdu)
+#define AUFS_CTL_RDU_INO _IOWR(AuCtlType, AuCtl_RDU_INO, struct aufs_rdu)
+#define AUFS_CTL_WBR_FD _IOW(AuCtlType, AuCtl_WBR_FD, \
+ struct aufs_wbr_fd)
+#define AUFS_CTL_IBUSY _IOWR(AuCtlType, AuCtl_IBUSY, struct aufs_ibusy)
+#define AUFS_CTL_MVDOWN _IOWR(AuCtlType, AuCtl_MVDOWN, \
+ struct aufs_mvdown)
+#define AUFS_CTL_BRINFO _IOW(AuCtlType, AuCtl_BR, union aufs_brinfo)
+#define AUFS_CTL_FHSM_FD _IOW(AuCtlType, AuCtl_FHSM_FD, int)
+
+#endif /* __AUFS_TYPE_H__ */
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index a5773899f4d9..22794eb8d1a3 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -48,6 +48,7 @@
#define CASE_LOWER_BASE 8 /* base is lower case */
#define CASE_LOWER_EXT 16 /* extension is lower case */
+#define FAT_NO_83NAME 32 /* no 8.3 short filename for this file */
#define DELETED_FLAG 0xe5 /* marks file as deleted when in name[0] */
#define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG)
diff --git a/include/uapi/linux/nfs_mount.h b/include/uapi/linux/nfs_mount.h
index e44e00616ab5..af94a8b7ad5c 100644
--- a/include/uapi/linux/nfs_mount.h
+++ b/include/uapi/linux/nfs_mount.h
@@ -21,7 +21,7 @@
* mount-to-kernel version compatibility. Some of these aren't used yet
* but here they are anyway.
*/
-#define NFS_MOUNT_VERSION 6
+#define NFS_MOUNT_VERSION 7
#define NFS_MAX_CONTEXT_LEN 256
struct nfs_mount_data {
@@ -44,6 +44,8 @@ struct nfs_mount_data {
struct nfs3_fh root; /* 4 */
int pseudoflavor; /* 5 */
char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */
+ int nfs_prog; /* 7 */
+ int mount_prog; /* 7 */
};
/* bits in the flags field visible to user space */
diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
index 8d2a3bfc8dac..f6cbd0d3c8ed 100644
--- a/include/video/uvesafb.h
+++ b/include/video/uvesafb.h
@@ -89,7 +89,9 @@ struct vbe_mode_ib {
#define UVESAFB_DEFAULT_MODE "640x480-16"
-/* How long to wait for a reply from userspace [ms] */
+/* How long to wait for a reply from userspace [ms]
+ * This is the default value of module param task_timeout
+ */
#define UVESAFB_TIMEOUT 5000
/* Max number of concurrent tasks */
diff --git a/init/Kconfig b/init/Kconfig
index c9386a365eea..bdf4f284509b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1196,6 +1196,31 @@ menuconfig EXPERT
environments which can tolerate a "non-standard" kernel.
Only use this if you really know what you are doing.
+config UPTIME_LIMITED_KERNEL
+ bool "Create a kernel with uptime limitations"
+ default n
+ help
+ Limit the amount of time a kernel can run. The associated UPTIME_LIMIT*
+ kernel config options should be used to tune the behaviour.
+
+config UPTIME_LIMIT_DURATION
+ int "Kernel uptime limit in minutes"
+ depends on UPTIME_LIMITED_KERNEL
+ range 0 14400
+ default 0
+ help
+ Define the uptime limitation on a kernel in minutes. Once
+ the defined time expires the kernel will emit a warning, cease
+ to be usable and eventually restart. The valid range is 0 (disable)
+ to 14400 (10 days)
+
+config UPTIME_LIMIT_KERNEL_REBOOT
+ bool "Reboot a time limited kernel at expiration"
+ depends on UPTIME_LIMITED_KERNEL
+ default y
+ help
+ Reboot an uptime limited kernel at expiration.
+
config UID16
bool "Enable 16-bit UID system calls" if EXPERT
depends on HAVE_UID16 && MULTIUSER
diff --git a/init/do_mounts.c b/init/do_mounts.c
index f8c230c77035..91ca8d279316 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -439,7 +439,9 @@ retry:
printk("DEBUG_BLOCK_EXT_DEVT is enabled, you need to specify "
"explicit textual name for \"root=\" boot option.\n");
#endif
- panic("VFS: Unable to mount root fs on %s", b);
+ printk(KERN_EMERG "VFS: Unable to mount root fs on %s\n", b);
+ printk(KERN_EMERG "User configuration error - no valid root filesystem found\n");
+ panic("Invalid configuration from end user prevents continuing");
}
if (!(flags & SB_RDONLY)) {
flags |= SB_RDONLY;
@@ -455,7 +457,9 @@ retry:
#ifdef CONFIG_BLOCK
__bdevname(ROOT_DEV, b);
#endif
- panic("VFS: Unable to mount root fs on %s", b);
+ printk(KERN_EMERG "VFS: Unable to mount root fs on %s\n", b);
+ printk(KERN_EMERG "User configuration error - no valid root filesystem found\n");
+ panic("Invalid configuration from end user prevents continuing");
out:
put_page(page);
}
diff --git a/init/main.c b/init/main.c
index 7ae824545265..cc5e538e1ba1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1102,6 +1102,9 @@ static int __ref kernel_init(void *unused)
static noinline void __init kernel_init_freeable(void)
{
+#ifndef CONFIG_BLK_DEV_INITRD
+ struct kstat console_stat;
+#endif
/*
* Wait until kthreadd is all set-up.
*/
@@ -1135,6 +1138,18 @@ static noinline void __init kernel_init_freeable(void)
do_basic_setup();
+#ifndef CONFIG_BLK_DEV_INITRD
+ /*
+ * Use /dev/console to infer if the rootfs is setup properly.
+ * In case of initrd or initramfs /dev/console might be instantiated
+ * later by /init so don't do this check for CONFIG_BLK_DEV_INITRD
+ */
+ if (vfs_lstat((char __user *) "/dev/console", (struct kstat __user *) &console_stat)
+ || !S_ISCHR(console_stat.mode)) {
+ panic("/dev/console is missing or not a character device!\nPlease ensure your rootfs is properly configured\n");
+ }
+#endif
+
/* Open the /dev/console on the rootfs, this should never fail */
if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
pr_err("Warning: unable to open an initial console.\n");
diff --git a/kernel/Makefile b/kernel/Makefile
index 6aa7543bcdb2..1ea0ba13a445 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
+obj-$(CONFIG_UPTIME_LIMITED_KERNEL) += uptime_limit.o
obj-$(CONFIG_FUTEX) += futex.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/kernel/fork.c b/kernel/fork.c
index 95fd41e92031..17389e9935c4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -547,7 +547,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
struct inode *inode = file_inode(file);
struct address_space *mapping = file->f_mapping;
- get_file(file);
+ vma_get_file(tmp);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
i_mmap_lock_write(mapping);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e805fe3bf87f..10a2cf88add3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -143,7 +143,7 @@ static
#endif
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
-static inline struct lock_class *hlock_class(struct held_lock *hlock)
+inline struct lock_class *lockdep_hlock_class(struct held_lock *hlock)
{
if (!hlock->class_idx) {
/*
@@ -154,6 +154,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
}
return lock_classes + hlock->class_idx - 1;
}
+EXPORT_SYMBOL_GPL(lockdep_hlock_class);
+#define hlock_class(hlock) lockdep_hlock_class(hlock)
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 0fef395662a6..83fb1ecfc33d 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -116,3 +116,4 @@ void task_work_run(void)
} while (work);
}
}
+EXPORT_SYMBOL_GPL(task_work_run);
diff --git a/kernel/uptime_limit.c b/kernel/uptime_limit.c
new file mode 100644
index 000000000000..b6a1a5e4f9d9
--- /dev/null
+++ b/kernel/uptime_limit.c
@@ -0,0 +1,166 @@
+/*
+ * uptime_limit.c
+ *
+ * This file contains the functions which can limit kernel uptime
+ *
+ * Copyright (C) 2011 Bruce Ashfield (bruce.ashfield@windriver.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * This functionality is somewhat close to the softdog watchdog
+ * implementation, but it cannot be used directly for several reasons:
+ *
+ * - The soft watchdog should be available while this functionality is active
+ * - The duration range is different between this and the softdog. The
+ * timeout available here is potentially quite a bit longer.
+ * - At expiration, there are different expiration requirements and actions.
+ * - This functionality is specific to a particular use case and should
+ * not impact mainline functionality
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#define UPTIME_LIMIT_IN_SECONDS (CONFIG_UPTIME_LIMIT_DURATION * 60)
+#define MIN(X, Y) ((X) <= (Y) ? (X) : (Y))
+#define TEN_MINUTES_IN_SECONDS 600
+
+enum uptime_expiration_type {
+ uptime_no_action,
+ uptime_reboot
+};
+
+static enum uptime_expiration_type uptime_expiration_action = uptime_no_action;
+static struct timer_list timelimit_timer;
+static struct task_struct *uptime_worker_task;
+
+static void timelimit_expire(unsigned long timeout_seconds)
+{
+ char msg[128];
+ int msglen = 127;
+
+ if (timeout_seconds) {
+ if (timeout_seconds >= 60)
+ snprintf(msg, msglen,
+ "Uptime: kernel validity duration has %d %s remaining\n",
+ (int) timeout_seconds / 60, "minute(s)");
+ else
+ snprintf(msg, msglen,
+ "Uptime: kernel validity duration has %d %s remaining\n",
+ (int) timeout_seconds, "seconds");
+
+ printk(KERN_CRIT "%s", msg);
+
+ timelimit_timer.expires = jiffies + timeout_seconds * HZ;
+ timelimit_timer.data = 0;
+ add_timer_on(&timelimit_timer, cpumask_first(cpu_online_mask));
+ } else {
+ printk(KERN_CRIT "Uptime: Kernel validity timeout has expired\n");
+#ifdef CONFIG_UPTIME_LIMIT_KERNEL_REBOOT
+ uptime_expiration_action = uptime_reboot;
+ wake_up_process(uptime_worker_task);
+ }
+#endif
+}
+
+/*
+ * This thread starts and then immediately goes to sleep. When it is woken
+ * up, it carries out the instructions left in uptime_expiration_action. If
+ * no action was specified it simply goes back to sleep.
+ */
+static int uptime_worker(void *unused)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (!kthread_should_stop()) {
+ schedule();
+
+ if (kthread_should_stop())
+ break;
+
+ if (uptime_expiration_action == uptime_reboot) {
+ printk(KERN_CRIT "Uptime: restarting machine\n");
+ kernel_restart(NULL);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static int timeout_enable(int cpu)
+{
+ int err = 0;
+ int warning_limit;
+
+ /*
+ * Create an uptime worker thread. This thread is required since the
+ * safe version of kernel restart cannot be called from a
+ * non-interruptible context. Which means we cannot call it directly
+ * from a timer callback. So we arrange for the timer expiration to
+ * wakeup a thread, which performs the action.
+ */
+ uptime_worker_task = kthread_create(uptime_worker,
+ (void *)(unsigned long)cpu,
+ "uptime_worker/%d", cpu);
+ if (IS_ERR(uptime_worker_task)) {
+ printk(KERN_ERR "Uptime: task for cpu %i failed\n", cpu);
+ err = PTR_ERR(uptime_worker_task);
+ goto out;
+ }
+ /* bind to cpu0 to avoid migration and hot plug nastiness */
+ kthread_bind(uptime_worker_task, cpu);
+ wake_up_process(uptime_worker_task);
+
+ /* Create the timer that will wake the uptime thread at expiration */
+ init_timer(&timelimit_timer);
+ timelimit_timer.function = timelimit_expire;
+ /*
+ * Fire two timers. One warning timeout and the final timer
+ * which will carry out the expiration action. The warning timer will
+ * expire at the minimum of half the original time or ten minutes.
+ */
+ warning_limit = MIN(UPTIME_LIMIT_IN_SECONDS/2, TEN_MINUTES_IN_SECONDS);
+ timelimit_timer.expires = jiffies + warning_limit * HZ;
+ timelimit_timer.data = UPTIME_LIMIT_IN_SECONDS - warning_limit;
+
+ add_timer_on(&timelimit_timer, cpumask_first(cpu_online_mask));
+out:
+ return err;
+}
+
+static int __init timelimit_init(void)
+{
+ int err = 0;
+
+ printk(KERN_INFO "Uptime: system uptime restrictions enabled\n");
+
+ /*
+ * Enable the timeout thread for cpu 0 only, assuming that the
+ * uptime limit is non-zero, to protect against any cpu
+ * migration issues.
+ */
+ if (UPTIME_LIMIT_IN_SECONDS)
+ err = timeout_enable(0);
+
+ return err;
+}
+device_initcall(timelimit_init);
diff --git a/mm/Makefile b/mm/Makefile
index d210cc9d6f80..e77e80ce7298 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -39,7 +39,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o $(mmu-y)
+ prfile.o debug.o $(mmu-y)
obj-y += init-mm.o
obj-y += memblock.o
diff --git a/mm/filemap.c b/mm/filemap.c
index 9f5e323e883e..90db684a7cf4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2677,7 +2677,7 @@ vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
vm_fault_t ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
- file_update_time(vmf->vma->vm_file);
+ vma_file_update_time(vmf->vma);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
diff --git a/mm/mmap.c b/mm/mmap.c
index da9236a5022e..d55729a0fb87 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -181,7 +181,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
- fput(vma->vm_file);
+ vma_fput(vma);
mpol_put(vma_policy(vma));
vm_area_free(vma);
return next;
@@ -930,7 +930,7 @@ again:
if (remove_next) {
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
- fput(file);
+ vma_fput(vma);
}
if (next->anon_vma)
anon_vma_merge(vma, next);
@@ -1846,8 +1846,8 @@ out:
return addr;
unmap_and_free_vma:
+ vma_fput(vma);
vma->vm_file = NULL;
- fput(file);
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
@@ -2680,7 +2680,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_free_mpol;
if (new->vm_file)
- get_file(new->vm_file);
+ vma_get_file(new);
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
@@ -2699,7 +2699,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (new->vm_ops && new->vm_ops->close)
new->vm_ops->close(new);
if (new->vm_file)
- fput(new->vm_file);
+ vma_fput(new);
unlink_anon_vmas(new);
out_free_mpol:
mpol_put(vma_policy(new));
@@ -2889,7 +2889,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
struct vm_area_struct *vma;
unsigned long populate = 0;
unsigned long ret = -EINVAL;
- struct file *file;
+ struct file *file, *prfile;
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
current->comm, current->pid);
@@ -2964,10 +2964,27 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
}
}
- file = get_file(vma->vm_file);
+ vma_get_file(vma);
+ file = vma->vm_file;
+ prfile = vma->vm_prfile;
ret = do_mmap_pgoff(vma->vm_file, start, size,
prot, flags, pgoff, &populate, NULL);
+ if (!IS_ERR_VALUE(ret) && file && prfile) {
+ struct vm_area_struct *new_vma;
+
+ new_vma = find_vma(mm, ret);
+ if (!new_vma->vm_prfile)
+ new_vma->vm_prfile = prfile;
+ if (new_vma != vma)
+ get_file(prfile);
+ }
+ /*
+ * two fput()s instead of vma_fput(vma),
+ * coz vma may not be available anymore.
+ */
fput(file);
+ if (prfile)
+ fput(prfile);
out:
up_write(&mm->mmap_sem);
if (populate)
@@ -3257,7 +3274,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
if (new_vma->vm_file)
- get_file(new_vma->vm_file);
+ vma_get_file(new_vma);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent);
diff --git a/mm/nommu.c b/mm/nommu.c
index 749276beb109..d56f8f2bbd24 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -625,7 +625,7 @@ static void __put_nommu_region(struct vm_region *region)
up_write(&nommu_region_sem);
if (region->vm_file)
- fput(region->vm_file);
+ vmr_fput(region);
/* IO memory and memory shared directly out of the pagecache
* from ramfs/tmpfs mustn't be released here */
@@ -763,7 +763,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
- fput(vma->vm_file);
+ vma_fput(vma);
put_nommu_region(vma->vm_region);
vm_area_free(vma);
}
@@ -1286,7 +1286,7 @@ unsigned long do_mmap(struct file *file,
goto error_just_free;
}
}
- fput(region->vm_file);
+ vmr_fput(region);
kmem_cache_free(vm_region_jar, region);
region = pregion;
result = start;
@@ -1361,7 +1361,7 @@ error_just_free:
up_write(&nommu_region_sem);
error:
if (region->vm_file)
- fput(region->vm_file);
+ vmr_fput(region);
kmem_cache_free(vm_region_jar, region);
if (vma->vm_file)
fput(vma->vm_file);
diff --git a/mm/prfile.c b/mm/prfile.c
new file mode 100644
index 000000000000..024cdcfae1b1
--- /dev/null
+++ b/mm/prfile.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mainly for aufs which mmap(2) different file and wants to print different
+ * path in /proc/PID/maps.
+ * Call these functions via macros defined in linux/mm.h.
+ *
+ * See Documentation/filesystems/aufs/design/06mmap.txt
+ *
+ * Copyright (c) 2014-2019 Junjro R. Okajima
+ * Copyright (c) 2014 Ian Campbell
+ */
+
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+
+/* #define PRFILE_TRACE */
+static inline void prfile_trace(struct file *f, struct file *pr,
+ const char func[], int line, const char func2[])
+{
+#ifdef PRFILE_TRACE
+ if (pr)
+ pr_info("%s:%d: %s, %pD2\n", func, line, func2, f);
+#endif
+}
+
+void vma_do_file_update_time(struct vm_area_struct *vma, const char func[],
+ int line)
+{
+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ file_update_time(f);
+ if (f && pr)
+ file_update_time(pr);
+}
+
+struct file *vma_do_pr_or_file(struct vm_area_struct *vma, const char func[],
+ int line)
+{
+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ return (f && pr) ? pr : f;
+}
+
+void vma_do_get_file(struct vm_area_struct *vma, const char func[], int line)
+{
+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ get_file(f);
+ if (f && pr)
+ get_file(pr);
+}
+
+void vma_do_fput(struct vm_area_struct *vma, const char func[], int line)
+{
+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ fput(f);
+ if (f && pr)
+ fput(pr);
+}
+
+#ifndef CONFIG_MMU
+struct file *vmr_do_pr_or_file(struct vm_region *region, const char func[],
+ int line)
+{
+ struct file *f = region->vm_file, *pr = region->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ return (f && pr) ? pr : f;
+}
+
+void vmr_do_fput(struct vm_region *region, const char func[], int line)
+{
+ struct file *f = region->vm_file, *pr = region->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ fput(f);
+ if (f && pr)
+ fput(pr);
+}
+#endif /* !CONFIG_MMU */
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 850a6f13a082..f2d56f9e8303 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -899,7 +899,14 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
agents are active. Taking the first reply prevents
arp trashing and chooses the fastest router.
*/
- override = time_after(jiffies,
+ /*
+ * If n->updated is after jiffies, then the clock has wrapped and
+ * we are *well* past the locktime, so set the override flag
+ */
+ if (time_after(n->updated, jiffies))
+ override = 1;
+ else
+ override = time_after(jiffies,
n->updated +
NEIGH_VAR(n->parms, LOCKTIME)) ||
is_garp;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e258a00b4a3d..5f335e237f6b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2561,28 +2561,23 @@ static __net_init int devinet_init_net(struct net *net)
#endif
err = -ENOMEM;
- all = &ipv4_devconf;
- dflt = &ipv4_devconf_dflt;
+ all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
+ if (all == NULL)
+ goto err_alloc_all;
- if (!net_eq(net, &init_net)) {
- all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
- if (!all)
- goto err_alloc_all;
-
- dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
- if (!dflt)
- goto err_alloc_dflt;
+ dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
+ if (dflt == NULL)
+ goto err_alloc_dflt;
#ifdef CONFIG_SYSCTL
- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
- if (!tbl)
- goto err_alloc_ctl;
+ tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_alloc_ctl;
- tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
- tbl[0].extra1 = all;
- tbl[0].extra2 = net;
+ tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
+ tbl[0].extra1 = all;
+ tbl[0].extra2 = net;
#endif
- }
#ifdef CONFIG_SYSCTL
err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh
index c812872d7f9d..65a9b9e5b8a6 100755..100644
--- a/scripts/kconfig/mconf-cfg.sh
+++ b/scripts/kconfig/mconf-cfg.sh
@@ -4,6 +4,14 @@
PKG="ncursesw"
PKG2="ncurses"
+if [ "$CROSS_CURSES_LIB" != "" ]; then
+ echo libs=\'$CROSS_CURSES_LIB\'
+ if [ x"$CROSS_CURSES_INC" != x ]; then
+ echo cflags=\'$CROSS_CURSES_INC\'
+ fi
+ exit 0
+fi
+
if [ -n "$(command -v pkg-config)" ]; then
if pkg-config --exists $PKG; then
echo cflags=\"$(pkg-config --cflags $PKG)\"
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 588a3bc29ecc..01b466894cb7 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -383,9 +383,10 @@ failed:
* spaces in the beginning of the line is trimmed away.
* Return a pointer to a static buffer.
**/
+#define MODPOST_MAX_LINE 32768
char *get_next_line(unsigned long *pos, void *file, unsigned long size)
{
- static char line[4096];
+ static char line[MODPOST_MAX_LINE];
int skip = 1;
size_t len = 0;
signed char *p = (signed char *)file + *pos;
@@ -400,8 +401,11 @@ char *get_next_line(unsigned long *pos, void *file, unsigned long size)
if (*p != '\n' && (*pos < size)) {
len++;
*s++ = *p++;
- if (len > 4095)
+ if (len > (sizeof(line)-1)) {
+ warn(" %s: line exceeds buffer size %zu bytes\n"
+ , __func__, sizeof(line));
break; /* Too long, stop */
+ }
} else {
/* End of string */
*s = '\0';
@@ -1949,7 +1953,7 @@ static void read_symbols(const char *modname)
char *version;
char *license;
struct module *mod;
- struct elf_info info = { };
+ struct elf_info info = { .hdr = NULL };
Elf_Sym *sym;
if (!parse_elf(&info, modname))
@@ -2398,7 +2402,7 @@ static int dump_sym(struct symbol *sym)
static void write_dump(const char *fname)
{
- struct buffer buf = { };
+ struct buffer buf = { NULL, 0, 0 };
struct symbol *symbol;
int n;
@@ -2425,7 +2429,7 @@ struct ext_sym_list {
int main(int argc, char **argv)
{
struct module *mod;
- struct buffer buf = { };
+ struct buffer buf = { NULL, 0, 0 };
char *kernel_read = NULL, *module_read = NULL;
char *dump_write = NULL, *files_source = NULL;
int opt;
diff --git a/security/commoncap.c b/security/commoncap.c
index 232db019f051..a402a5b72bf4 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -1332,12 +1332,14 @@ int cap_mmap_addr(unsigned long addr)
}
return ret;
}
+EXPORT_SYMBOL_GPL(cap_mmap_addr);
int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
{
return 0;
}
+EXPORT_SYMBOL_GPL(cap_mmap_file);
#ifdef CONFIG_SECURITY
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index dc28914fa72e..88b46b6737df 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -8,6 +8,7 @@
#include <linux/device_cgroup.h>
#include <linux/cgroup.h>
#include <linux/ctype.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
@@ -824,3 +825,4 @@ int __devcgroup_check_permission(short type, u32 major, u32 minor,
return 0;
}
+EXPORT_SYMBOL_GPL(__devcgroup_check_permission);
diff --git a/security/security.c b/security/security.c
index 55bc49027ba9..870eaa8ebedc 100644
--- a/security/security.c
+++ b/security/security.c
@@ -566,6 +566,7 @@ int security_path_rmdir(const struct path *dir, struct dentry *dentry)
return 0;
return call_int_hook(path_rmdir, 0, dir, dentry);
}
+EXPORT_SYMBOL_GPL(security_path_rmdir);
int security_path_unlink(const struct path *dir, struct dentry *dentry)
{
@@ -582,6 +583,7 @@ int security_path_symlink(const struct path *dir, struct dentry *dentry,
return 0;
return call_int_hook(path_symlink, 0, dir, dentry, old_name);
}
+EXPORT_SYMBOL_GPL(security_path_symlink);
int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry)
@@ -590,6 +592,7 @@ int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
return 0;
return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
}
+EXPORT_SYMBOL_GPL(security_path_link);
int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry,
@@ -617,6 +620,7 @@ int security_path_truncate(const struct path *path)
return 0;
return call_int_hook(path_truncate, 0, path);
}
+EXPORT_SYMBOL_GPL(security_path_truncate);
int security_path_chmod(const struct path *path, umode_t mode)
{
@@ -624,6 +628,7 @@ int security_path_chmod(const struct path *path, umode_t mode)
return 0;
return call_int_hook(path_chmod, 0, path, mode);
}
+EXPORT_SYMBOL_GPL(security_path_chmod);
int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
@@ -631,6 +636,7 @@ int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
return 0;
return call_int_hook(path_chown, 0, path, uid, gid);
}
+EXPORT_SYMBOL_GPL(security_path_chown);
int security_path_chroot(const struct path *path)
{
@@ -716,6 +722,7 @@ int security_inode_readlink(struct dentry *dentry)
return 0;
return call_int_hook(inode_readlink, 0, dentry);
}
+EXPORT_SYMBOL_GPL(security_inode_readlink);
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
bool rcu)
@@ -731,6 +738,7 @@ int security_inode_permission(struct inode *inode, int mask)
return 0;
return call_int_hook(inode_permission, 0, inode, mask);
}
+EXPORT_SYMBOL_GPL(security_inode_permission);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
@@ -902,6 +910,7 @@ int security_file_permission(struct file *file, int mask)
return fsnotify_perm(file, mask);
}
+EXPORT_SYMBOL_GPL(security_file_permission);
int security_file_alloc(struct file *file)
{
@@ -961,6 +970,7 @@ int security_mmap_file(struct file *file, unsigned long prot,
return ret;
return ima_file_mmap(file, prot);
}
+EXPORT_SYMBOL_GPL(security_mmap_file);
int security_mmap_addr(unsigned long addr)
{
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2ec91085fa3e..789308f54785 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1788,9 +1788,6 @@ static int azx_first_init(struct azx *chip)
chip->msi = 0;
}
- if (azx_acquire_irq(chip, 0) < 0)
- return -EBUSY;
-
pci_set_master(pci);
synchronize_irq(bus->irq);
@@ -1904,6 +1901,9 @@ static int azx_first_init(struct azx *chip)
return -ENODEV;
}
+ if (azx_acquire_irq(chip, 0) < 0)
+ return -EBUSY;
+
strcpy(card->driver, "HDA-Intel");
strlcpy(card->shortname, driver_short_names[chip->driver_type],
sizeof(card->shortname));
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 1827c2f973f9..2868ec890b37 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -38,6 +38,7 @@
#define __always_inline inline
#endif
+#undef __user
#define __user
#define __rcu
#define __read_mostly
diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
index b02a36b2c14f..9166a071484c 100644
--- a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
@@ -1,4 +1,4 @@
-#!/bin/awk -f
+#!/usr/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
# gen-insn-attr-x86.awk: Instruction attribute table generator
# Written by Masami Hiramatsu <mhiramat@redhat.com>
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index cf4a8329c4c0..ab44fafde931 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -206,10 +206,10 @@ strip-libs = $(filter-out -l%,$(1))
PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
ifdef PYTHON_CONFIG
- PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+ PYTHON_EMBED_LDOPTS := $(shell pkg-config --libs python 2>/dev/null)
PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
- PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
+ PYTHON_EMBED_CCOPTS := $(shell pkg-config --includes python 2>/dev/null)
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
endif
@@ -617,8 +617,9 @@ ifndef NO_SLANG
msg := $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev);
NO_SLANG := 1
else
- # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
- CFLAGS += -I/usr/include/slang
+ # Some releases like Fedora have /usr/include/slang/slang.h instead of /usr/include/slang.h
+ SLANG_INC ?= -idirafter =/usr/include/slang
+ CFLAGS += $(SLANG_INC)
CFLAGS += -DHAVE_SLANG_SUPPORT
EXTLIBS += -lslang
$(call detected,CONFIG_SLANG)
@@ -653,6 +654,7 @@ else
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
ifneq ($(feature-libperl), 1)
+ msg := $(warning libperl not found, disables Perl scripting support. Please install libperl-dev or perl-devel);
CFLAGS += -DNO_LIBPERL
NO_LIBPERL := 1
msg := $(warning Missing perl devel files. Disabling perl scripting support, please install perl-ExtUtils-Embed/libperl-dev);
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 77f8f069f1e7..e509fd4f5689 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -890,7 +890,7 @@ install-bin: install-tools install-tests install-traceevent-plugins
install: install-bin try-install-man
install-python_ext:
- $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
+ $(PYTHON_WORD) util/setup.py --quiet install --prefix='$(DESTDIR_SQ)/usr'
# 'make install-doc' should call 'make -C Documentation install'
$(INSTALL_DOC_TARGETS):
diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk b/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
index a21454835cd4..2f5e8c99f372 100644
--- a/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
+++ b/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
@@ -1,4 +1,4 @@
-#!/bin/awk -f
+#!/usr/bin/awk -f
# gen-insn-x86-dat.awk: script to convert data for the insn-x86 test
# Copyright (c) 2015, Intel Corporation.
#
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 44195514b19e..1c81b9d4de71 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -27,6 +27,7 @@
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
+#include <inttypes.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/wait.h>
@@ -1188,7 +1189,7 @@ static void *worker_thread(void *__tdata)
/* Check whether our max runtime timed out: */
if (g->p.nr_secs) {
timersub(&stop, &start0, &diff);
- if ((u32)diff.tv_sec >= g->p.nr_secs) {
+ if ((u32)diff.tv_sec >= (long int)g->p.nr_secs) {
g->stop_work = true;
break;
}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 388c6dd128b8..f5e880143823 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -14,6 +14,8 @@ void test_attr__init(void);
void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
int fd, int group_fd, unsigned long flags);
+#include <stdbool.h>
+
#define HAVE_ATTR_TEST
#include "perf-sys.h"
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
index 1a0d27757eec..5795bca4f04a 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -19,6 +19,10 @@
*
*/
+#ifdef __mips__
+#include <sgidefs.h>
+#endif
+
#include <Python.h>
#include "../../../perf.h"
#include "../../../util/trace-event.h"
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index d8426547219b..16b4dfd619df 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -73,7 +73,7 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu,
return 0;
snprintf(path, PATH_MAX, "%s/event-%d-%llu-%d", dir,
- attr->type, attr->config, fd);
+ attr->type, (unsigned long long)attr->config, fd);
file = fopen(path, "w+");
if (!file) {
@@ -82,7 +82,7 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu,
}
if (fprintf(file, "[event-%d-%llu-%d]\n",
- attr->type, attr->config, fd) < 0) {
+ attr->type, (unsigned long long)attr->config, fd) < 0) {
perror("test attr - failed to write event file");
fclose(file);
return -1;
@@ -98,10 +98,10 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu,
/* struct perf_event_attr */
WRITE_ASS(type, PRIu32);
WRITE_ASS(size, PRIu32);
- WRITE_ASS(config, "llu");
- WRITE_ASS(sample_period, "llu");
- WRITE_ASS(sample_type, "llu");
- WRITE_ASS(read_format, "llu");
+ __WRITE_ASS(config, "llu", (unsigned long long)attr->config);
+ __WRITE_ASS(sample_period, "llu", (unsigned long long)attr->sample_period);
+ __WRITE_ASS(sample_type, "llu", (unsigned long long)attr->sample_type);
+ __WRITE_ASS(read_format, "llu", (unsigned long long)attr->read_format);
WRITE_ASS(disabled, "d");
WRITE_ASS(inherit, "d");
WRITE_ASS(pinned, "d");
@@ -132,10 +132,10 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu,
WRITE_ASS(use_clockid, "d");
WRITE_ASS(wakeup_events, PRIu32);
WRITE_ASS(bp_type, PRIu32);
- WRITE_ASS(config1, "llu");
- WRITE_ASS(config2, "llu");
- WRITE_ASS(branch_sample_type, "llu");
- WRITE_ASS(sample_regs_user, "llu");
+ __WRITE_ASS(config1, "llu", (unsigned long long)attr->config1);
+ __WRITE_ASS(config2, "llu", (unsigned long long)attr->config2);
+ __WRITE_ASS(branch_sample_type, "llu", (unsigned long long)attr->branch_sample_type);
+ __WRITE_ASS(sample_regs_user, "llu", (unsigned long long)attr->sample_regs_user);
WRITE_ASS(sample_stack_user, PRIu32);
fclose(file);
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index 910e25e64188..caedb5ec1523 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -128,7 +128,7 @@ static int __event(bool is_x, void *addr, int sig)
fd = sys_perf_event_open(&pe, 0, -1, -1,
perf_event_open_cloexec_flag());
if (fd < 0) {
- pr_debug("failed opening event %llx\n", pe.config);
+ pr_debug("failed opening event %llx\n", (unsigned long long)pe.config);
return TEST_FAIL;
}
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
index ca962559e845..35e1de86b295 100644
--- a/tools/perf/tests/bp_signal_overflow.c
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -95,7 +95,7 @@ int test__bp_signal_overflow(struct test *test __maybe_unused, int subtest __may
fd = sys_perf_event_open(&pe, 0, -1, -1,
perf_event_open_cloexec_flag());
if (fd < 0) {
- pr_debug("failed opening event %llx\n", pe.config);
+ pr_debug("failed opening event %llx\n", (unsigned long long)pe.config);
return TEST_FAIL;
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 9142fd294e76..bdd79184ebd3 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1723,7 +1723,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
err = asprintf(&command,
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
- " -l -d %s %s -C \"$1\" 2>/dev/null|grep -v \"$1:\"|expand",
+ " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%1:\"|sed 's/\t/ /g'",
opts->objdump_path ?: "objdump",
opts->disassembler_style ? "-M " : "",
opts->disassembler_style ?: "",
diff --git a/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk b/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk
index ddd5c4c21129..d236058fc4e2 100644
--- a/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk
+++ b/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk
@@ -1,4 +1,4 @@
-#!/bin/awk -f
+#!/usr/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
# gen-insn-attr-x86.awk: Instruction attribute table generator
# Written by Masami Hiramatsu <mhiramat@redhat.com>
diff --git a/tools/perf/util/libunwind/x86_32.c b/tools/perf/util/libunwind/x86_32.c
index c5e568188e19..d493f96b806d 100644
--- a/tools/perf/util/libunwind/x86_32.c
+++ b/tools/perf/util/libunwind/x86_32.c
@@ -21,6 +21,7 @@
#define LIBUNWIND__ARCH_REG_IP PERF_REG_X86_IP
#define LIBUNWIND__ARCH_REG_SP PERF_REG_X86_SP
+#include <errno.h>
#include "unwind.h"
#include "debug.h"
#include "libunwind-x86.h"
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 7059d1be2d09..3edb0152026b 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -19,6 +19,10 @@
*
*/
+#ifdef __mips__
+#include <sgidefs.h>
+#endif
+
#include <Python.h>
#include <inttypes.h>
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
index e05182d3e47d..6798ab45032d 100755..100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
@@ -1,4 +1,4 @@
-#!/usr/bin/awk -f
+#!/usr/usr/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
# Modify SRCU for formal verification. The first argument should be srcu.h and
diff --git a/usr/Makefile b/usr/Makefile
index 4a70ae43c9cb..c49985b27ac8 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -24,7 +24,7 @@ $(obj)/initramfs_data.o: $(obj)/$(datafile_y) FORCE
# Generate the initramfs cpio archive
hostprogs-y := gen_init_cpio
-initramfs := $(CONFIG_SHELL) $(srctree)/$(src)/gen_initramfs_list.sh
+initramfs := $(INITRAMFS_WRAPPER) $(CONFIG_SHELL) $(srctree)/$(src)/gen_initramfs_list.sh
ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \
$(shell echo $(CONFIG_INITRAMFS_SOURCE)),-d)
ramfs-args := \