* Default implementations and global values for the VFS. */
#include <vfs.h> // keep this first
+#include <ros/errno.h>
#include <sys/queue.h>
#include <assert.h>
#include <stdio.h>
#include <pmap.h>
#include <umem.h>
#include <smp.h>
+#include <ns.h>
+#include <fdtap.h>
struct sb_tailq super_blocks = TAILQ_HEAD_INITIALIZER(super_blocks);
spinlock_t super_blocks_lock = SPINLOCK_INITIALIZER;
struct kmem_cache *inode_kcache;
struct kmem_cache *file_kcache;
+enum {
+ VFS_MTIME,
+ VFS_CTIME,
+ VFS_ATIME,
+};
+
+/* mtime implies ctime implies atime. */
+static void set_acmtime(struct inode *inode, int which)
+{
+ struct timespec now = nsec2timespec(epoch_nsec());
+
+ switch (which) {
+ case VFS_MTIME:
+ inode->i_mtime.tv_sec = now.tv_sec;
+ inode->i_mtime.tv_nsec = now.tv_nsec;
+ /* fall through */
+ case VFS_CTIME:
+ inode->i_ctime.tv_sec = now.tv_sec;
+ inode->i_ctime.tv_nsec = now.tv_nsec;
+ /* fall through */
+ case VFS_ATIME:
+ inode->i_atime.tv_sec = now.tv_sec;
+ inode->i_atime.tv_nsec = now.tv_nsec;
+ }
+}
+
/* Mounts fs from dev_name at mnt_pt in namespace ns. There could be no mnt_pt,
* such as with the root of (the default) namespace. Not sure how it would work
* with multiple namespaces on the same FS yet. Note if you mount the same FS
struct fs_type *fs;
dentry_kcache = kmem_cache_create("dentry", sizeof(struct dentry),
- __alignof__(struct dentry), 0, 0, 0);
+ __alignof__(struct dentry), 0,
+ NULL, 0, 0, NULL);
inode_kcache = kmem_cache_create("inode", sizeof(struct inode),
- __alignof__(struct inode), 0, 0, 0);
+ __alignof__(struct inode), 0, NULL,
+ 0, 0, NULL);
file_kcache = kmem_cache_create("file", sizeof(struct file),
- __alignof__(struct file), 0, 0, 0);
+ __alignof__(struct file), 0, NULL, 0,
+ 0, NULL);
/* default NS never dies, +1 to exist */
kref_init(&default_ns.kref, fake_release, 1);
spinlock_init(&default_ns.lock);
printk("vfs_init() completed\n");
}
+/* FS's can provide another, if they want */
+int generic_dentry_hash(struct dentry *dentry, struct qstr *qstr)
+{
+ unsigned long hash = 5381;
+
+ for (int i = 0; i < qstr->len; i++) {
+ /* hash * 33 + c, djb2's technique */
+ hash = ((hash << 5) + hash) + qstr->name[i];
+ }
+ return hash;
+}
+
/* Builds / populates the qstr of a dentry based on its d_iname. If there is an
* l_name, (long), it will use that instead of the inline name. This will
* probably change a bit. */
void qstr_builder(struct dentry *dentry, char *l_name)
{
dentry->d_name.name = l_name ? l_name : dentry->d_iname;
- // TODO: pending what we actually do in d_hash
- //dentry->d_name.hash = dentry->d_op->d_hash(dentry, &dentry->d_name);
- dentry->d_name.hash = 0xcafebabe;
dentry->d_name.len = strnlen(dentry->d_name.name, MAX_FILENAME_SZ);
+ dentry->d_name.hash = dentry->d_op->d_hash(dentry, &dentry->d_name);
}
/* Useful little helper - return the string ptr for a given file */
return file->f_dentry->d_name.name;
}
+static int prepend(char **pbuf, size_t *pbuflen, const char *str, size_t len)
+{
+ if (*pbuflen < len)
+ return -ENAMETOOLONG;
+ *pbuflen -= len;
+ *pbuf -= len;
+ memcpy(*pbuf, str, len);
+
+ return 0;
+}
+
+char *dentry_path(struct dentry *dentry, char *path, size_t max_size)
+{
+ size_t csize = max_size;
+ char *path_start = path + max_size, *base;
+
+ if (prepend(&path_start, &csize, "\0", 1) < 0 || csize < 1)
+ return NULL;
+ /* Handle the case that the passed dentry is the root. */
+ base = path_start - 1;
+ *base = '/';
+ while (!DENTRY_IS_ROOT(dentry)) {
+ if (prepend(&path_start, &csize, dentry->d_name.name,
+ dentry->d_name.len) < 0 ||
+ prepend(&path_start, &csize, "/", 1) < 0)
+ return NULL;
+ base = path_start;
+ dentry = dentry->d_parent;
+ }
+
+ return base;
+}
+
/* Some issues with this, coupled closely to fs_lookup.
*
* Note the use of __dentry_free, instead of kref_put. In those cases, we don't
warn("OOM in do_lookup(), probably wasn't expected\n");
return 0;
}
- result = dcache_get(parent->d_sb, query);
+ result = dcache_get(parent->d_sb, query);
if (result) {
__dentry_free(query);
return result;
/* TODO: if the following are done by us, how do we know the i_ino?
* also need to handle inodes that are already read in! For now, we're
- * going to have the FS handle it in it's lookup() method:
+ * going to have the FS handle it in its lookup() method:
* - get a new inode
* - read in the inode
* - put in the inode cache */
nd->depth++;
symname = nd->dentry->d_inode->i_op->readlink(nd->dentry);
/* We need to pin in nd->dentry (the dentry of the symlink), since we need
- * it's symname's storage to stay in memory throughout the upcoming
+ * its symname's storage to stay in memory throughout the upcoming
* link_path_walk(). The last_sym gets decreffed when we path_release() or
* follow another symlink. */
if (nd->last_sym)
if (!current)
nd->dentry = default_ns.root->mnt_root;
else
- nd->dentry = current->fs_env.root;
+ nd->dentry = current->fs_env.root;
nd->mnt = nd->dentry->d_sb->s_mount;
kref_get(&nd->mnt->mnt_kref, 1);
kref_get(&nd->dentry->d_kref, 1);
return FALSE;
}
-/* Simple helper to set nd to track it's last name to be Name. Also be careful
+/* Simple helper to set nd to track its last name to be Name. Also be careful
* with the storage of name. Don't use and nd's name past the lifetime of the
* string used in the path_lookup()/link_path_walk/whatever. Consider replacing
* parts of this with a qstr builder. Note this uses the dentry's d_op, which
}
/* Given path, return the inode for the final dentry. The ND should be
- * initialized for the first call - specifically, we need the intent.
+ * initialized for the first call - specifically, we need the intent.
* LOOKUP_PARENT and friends go in the flags var, which is not the intent.
*
* If path_lookup wants a PARENT, but hits the top of the FS (root or
int retval;
printd("Path lookup for %s\n", path);
/* we allow absolute lookups with no process context */
+ /* TODO: RCU read lock on pwd or kref_not_zero in a loop. concurrent chdir
+ * could decref nd->dentry before we get to incref it below. */
if (path[0] == '/') { /* absolute lookup */
if (!current)
nd->dentry = default_ns.root->mnt_root;
else
- nd->dentry = current->fs_env.root;
+ nd->dentry = current->fs_env.root;
} else { /* relative lookup */
assert(current);
/* Don't need to lock on the fs_env since we're reading one item */
- nd->dentry = current->fs_env.pwd;
+ nd->dentry = current->fs_env.pwd;
}
nd->mnt = nd->dentry->d_sb->s_mount;
/* Whenever references get put in the nd, incref them. Whenever they are
kref_get(&nd->dentry->d_kref, 1);
nd->flags = flags;
nd->depth = 0; /* used in symlink following */
- retval = link_path_walk(path, nd);
+ retval = link_path_walk(path, nd);
/* make sure our PARENT lookup worked */
if (!retval && (flags & LOOKUP_PARENT))
assert(nd->last.name);
retval = path_lookup(path, LOOKUP_DIRECTORY, nd);
if (retval)
goto out;
- /* taking the namespace of the vfsmount of path */
+ /* taking the namespace of the vfsmount of path */
if (!__mount_fs(fs, dev_name, nd->dentry, flags, nd->mnt->mnt_namespace))
retval = -EINVAL;
out:
/* Helper to alloc and initialize a generic superblock. This handles all the
* VFS related things, like lists. Each FS will need to handle its own things
- * in it's *_get_sb(), usually involving reading off the disc. */
+ * in its *_get_sb(), usually involving reading off the disc. */
struct super_block *get_sb(void)
{
struct super_block *sb = kmalloc(sizeof(struct super_block), 0);
/* Final stages of initializing a super block, including creating and linking
* the root dentry, root inode, vmnt, and sb. The d_op and root_ino are
- * FS-specific, but otherwise it's FS-independent, tricky, and not worth having
+ * FS-specific, but otherwise its FS-independent, tricky, and not worth having
* around multiple times.
*
* Not the world's best interface, so it's subject to change, esp since we're
{
/* Build and init the first dentry / inode. The dentry ref is stored later
* by vfsmount's mnt_root. The parent is dealt with later. */
- struct dentry *d_root = get_dentry(sb, 0, "/"); /* probably right */
+ struct dentry *d_root = get_dentry_with_ops(sb, 0, "/", d_op);
if (!d_root)
panic("OOM! init_sb() can't fail yet!");
/* Dentry Functions */
-/* Helper to alloc and initialize a generic dentry. The following needs to be
- * set still: d_op (if no parent), d_fs_info (opt), d_inode, connect the inode
- * to the dentry (and up the d_kref again), maybe dcache_put(). The inode
- * stitching is done in get_inode() or lookup (depending on the FS).
- * The setting of the d_op might be problematic when dealing with mounts. Just
- * overwrite it.
- *
- * If the name is longer than the inline name, it will kmalloc a buffer, so
- * don't worry about the storage for *name after calling this. */
-struct dentry *get_dentry(struct super_block *sb, struct dentry *parent,
- char *name)
+static void dentry_set_name(struct dentry *dentry, char *name)
{
- assert(name);
size_t name_len = strnlen(name, MAX_FILENAME_SZ); /* not including \0! */
- struct dentry *dentry = kmem_cache_alloc(dentry_kcache, 0);
char *l_name = 0;
+ if (name_len < DNAME_INLINE_LEN) {
+ strlcpy(dentry->d_iname, name, name_len + 1);
+ qstr_builder(dentry, 0);
+ } else {
+ l_name = kmalloc(name_len + 1, 0);
+ assert(l_name);
+ strlcpy(l_name, name, name_len + 1);
+ qstr_builder(dentry, l_name);
+ }
+}
+
+/* Gets a dentry. If there is no parent, use d_op. Only called directly by
+ * superblock init code. */
+struct dentry *get_dentry_with_ops(struct super_block *sb,
+ struct dentry *parent, char *name,
+ struct dentry_operations *d_op)
+{
+ assert(name);
+ struct dentry *dentry = kmem_cache_alloc(dentry_kcache, 0);
if (!dentry) {
set_errno(ENOMEM);
if (parent) { /* no parent for rootfs mount */
kref_get(&parent->d_kref, 1);
dentry->d_op = parent->d_op; /* d_op set in init_sb for parentless */
+ } else {
+ dentry->d_op = d_op;
}
dentry->d_parent = parent;
dentry->d_flags = DENTRY_USED;
dentry->d_fs_info = 0;
- if (name_len < DNAME_INLINE_LEN) {
- strncpy(dentry->d_iname, name, name_len);
- dentry->d_iname[name_len] = '\0';
- qstr_builder(dentry, 0);
- } else {
- l_name = kmalloc(name_len + 1, 0);
- assert(l_name);
- strncpy(l_name, name, name_len);
- l_name[name_len] = '\0';
- qstr_builder(dentry, l_name);
- }
+ dentry_set_name(dentry, name);
/* Catch bugs by aggressively zeroing this (o/w we use old stuff) */
dentry->d_inode = 0;
return dentry;
}
+/* Helper to alloc and initialize a generic dentry. The following needs to be
+ * set still: d_op (if no parent), d_fs_info (opt), d_inode, connect the inode
+ * to the dentry (and up the d_kref again), maybe dcache_put(). The inode
+ * stitching is done in get_inode() or lookup (depending on the FS).
+ * The setting of the d_op might be problematic when dealing with mounts. Just
+ * overwrite it.
+ *
+ * If the name is longer than the inline name, it will kmalloc a buffer, so
+ * don't worry about the storage for *name after calling this. */
+struct dentry *get_dentry(struct super_block *sb, struct dentry *parent,
+ char *name)
+{
+ return get_dentry_with_ops(sb, parent, name, 0);
+}
+
/* Called when the dentry is unreferenced (after kref == 0). This works closely
* with the resurrection in dcache_get().
*
int retval;
spin_lock(&sb->s_dcache_lock);
old = hashtable_remove(sb->s_dcache, key_val);
- if (old) {
- assert(old->d_flags & DENTRY_NEGATIVE);
+ /* if it is old and non-negative, our caller lost a race with someone else
+ * adding the dentry. but since we yanked it out, like a bunch of idiots,
+ * we still have to put it back. should be fairly rare. */
+ if (old && (old->d_flags & DENTRY_NEGATIVE)) {
/* This is possible, but rare for now (about to be put on the LRU) */
assert(!(old->d_flags & DENTRY_USED));
assert(!kref_refcnt(&old->d_kref));
spin_lock(&sb->s_lru_lock);
TAILQ_REMOVE(&sb->s_lru_d, old, d_lru);
spin_unlock(&sb->s_lru_lock);
+ /* TODO: this seems suspect. isn't this the same memory as key_val?
+ * in which case, we just adjust the flags (remove NEG) and reinsert? */
+ assert(old != key_val); // checking TODO comment
__dentry_free(old);
}
/* this returns 0 on failure (TODO: Fix this ghetto shit) */
inode->i_nlink = 1;
inode->i_size = 0;
inode->i_blocks = 0;
- inode->i_atime.tv_sec = 0; /* TODO: now! */
- inode->i_ctime.tv_sec = 0;
- inode->i_mtime.tv_sec = 0;
- inode->i_atime.tv_nsec = 0; /* are these supposed to be the extra ns? */
- inode->i_ctime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
+ set_acmtime(inode, VFS_MTIME);
inode->i_bdev = inode->i_sb->s_bdev;
/* when we have notions of users, do something here: */
inode->i_uid = 0;
if (!new_file)
return -1;
dir->i_op->create(dir, dentry, mode, 0);
+ set_acmtime(dir, VFS_MTIME);
icache_put(new_file->i_sb, new_file);
kref_put(&new_file->i_kref);
return 0;
assert(parent && parent == TAILQ_LAST(&dir->i_dentry, dentry_tailq));
/* parent dentry tracks dentry as a subdir, weak reference */
TAILQ_INSERT_TAIL(&parent->d_subdirs, dentry, d_subdirs_link);
+ set_acmtime(dir, VFS_MTIME);
icache_put(new_dir->i_sb, new_dir);
kref_put(&new_dir->i_kref);
return 0;
if (!new_sym)
return -1;
dir->i_op->symlink(dir, dentry, symname);
+ set_acmtime(dir, VFS_MTIME);
icache_put(new_sym->i_sb, new_sym);
kref_put(&new_sym->i_kref);
return 0;
kstat->st_size = inode->i_size;
kstat->st_blksize = inode->i_blksize;
kstat->st_blocks = inode->i_blocks;
- kstat->st_atime = inode->i_atime;
- kstat->st_mtime = inode->i_mtime;
- kstat->st_ctime = inode->i_ctime;
+ kstat->st_atim = inode->i_atime;
+ kstat->st_mtim = inode->i_mtime;
+ kstat->st_ctim = inode->i_ctime;
}
void print_kstat(struct kstat *kstat)
printk("\tst_size : %p\n", kstat->st_size);
printk("\tst_blksize: %p\n", kstat->st_blksize);
printk("\tst_blocks : %p\n", kstat->st_blocks);
- printk("\tst_atime : %p\n", kstat->st_atime);
- printk("\tst_mtime : %p\n", kstat->st_mtime);
- printk("\tst_ctime : %p\n", kstat->st_ctime);
+ printk("\tst_atime : %p\n", kstat->st_atim);
+ printk("\tst_mtime : %p\n", kstat->st_mtim);
+ printk("\tst_ctime : %p\n", kstat->st_ctim);
}
/* Inode Cache management. In general, search on the ino, get a refcnt'd value
unsigned long first_idx, last_idx;
size_t copy_amt;
char *buf_end;
+ /* read in offset, in case of a concurrent reader/writer, so we don't screw
+ * up our math for count, the idxs, etc. */
+ off64_t orig_off = ACCESS_ONCE(*offset);
/* Consider pushing some error checking higher in the VFS */
if (!count)
return 0;
- if (*offset >= file->f_dentry->d_inode->i_size)
+ if (!(file->f_flags & O_READ)) {
+ set_errno(EBADF);
+ return 0;
+ }
+ if (orig_off >= file->f_dentry->d_inode->i_size)
return 0; /* EOF */
/* Make sure we don't go past the end of the file */
- if (*offset + count > file->f_dentry->d_inode->i_size) {
- count = file->f_dentry->d_inode->i_size - *offset;
+ if (orig_off + count > file->f_dentry->d_inode->i_size) {
+ count = file->f_dentry->d_inode->i_size - orig_off;
}
assert((long)count > 0);
- page_off = *offset & (PGSIZE - 1);
- first_idx = *offset >> PGSHIFT;
- last_idx = (*offset + count) >> PGSHIFT;
+ page_off = orig_off & (PGSIZE - 1);
+ first_idx = orig_off >> PGSHIFT;
+ last_idx = (orig_off + count) >> PGSHIFT;
buf_end = buf + count;
/* For each file page, make sure it's in the page cache, then copy it out.
* TODO: will probably need to consider concurrently truncated files here.*/
error = pm_load_page(file->f_mapping, i, &page);
assert(!error); /* TODO: handle ENOMEM and friends */
copy_amt = MIN(PGSIZE - page_off, buf_end - buf);
- /* TODO: (UMEM) think about this. if it's a user buffer, we're relying
- * on current to detect whose it is (which should work for async calls).
- * Also, need to propagate errors properly... Probably should do a
- * user_mem_check, then free, and also to make a distinction between
- * when the kernel wants a read/write (TODO: KFOP) */
- if (current) {
+ /* TODO: (KFOP) Probably shouldn't do this. Either memcpy directly, or
+ * split out the is_user_r(w)addr from copy_{to,from}_user() */
+ if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
memcpy_to_user(current, buf, page2kva(page) + page_off, copy_amt);
- } else {
+ else
memcpy(buf, page2kva(page) + page_off, copy_amt);
- }
buf += copy_amt;
page_off = 0;
pm_put_page(page); /* it's still in the cache, we just don't need it */
}
assert(buf == buf_end);
- *offset += count;
+ /* could have concurrent file ops that screw with offset, so userspace isn't
+ * safe. but at least it'll be a value that one of the concurrent ops could
+ * have produced (compared to *offset_changed_concurrently += count. */
+ *offset = orig_off + count;
+ set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
return count;
}
unsigned long first_idx, last_idx;
size_t copy_amt;
const char *buf_end;
+ off64_t orig_off = ACCESS_ONCE(*offset);
/* Consider pushing some error checking higher in the VFS */
if (!count)
return 0;
- /* Extend the file. Should put more checks in here, and maybe do this per
- * page in the for loop below. */
- if (*offset + count > file->f_dentry->d_inode->i_size)
- file->f_dentry->d_inode->i_size = *offset + count;
- page_off = *offset & (PGSIZE - 1);
- first_idx = *offset >> PGSHIFT;
- last_idx = (*offset + count) >> PGSHIFT;
+ if (!(file->f_flags & O_WRITE)) {
+ set_errno(EBADF);
+ return 0;
+ }
+ if (file->f_flags & O_APPEND) {
+ spin_lock(&file->f_dentry->d_inode->i_lock);
+ orig_off = file->f_dentry->d_inode->i_size;
+ /* setting the filesize here, instead of during the extend-check, since
+ * we need to atomically reserve space and set our write position. */
+ file->f_dentry->d_inode->i_size += count;
+ spin_unlock(&file->f_dentry->d_inode->i_lock);
+ } else {
+ if (orig_off + count > file->f_dentry->d_inode->i_size) {
+ /* lock for writes to i_size. we allow lockless reads. recheck
+ * i_size in case of concurrent writers since our orig check. */
+ spin_lock(&file->f_dentry->d_inode->i_lock);
+ if (orig_off + count > file->f_dentry->d_inode->i_size)
+ file->f_dentry->d_inode->i_size = orig_off + count;
+ spin_unlock(&file->f_dentry->d_inode->i_lock);
+ }
+ }
+ page_off = orig_off & (PGSIZE - 1);
+ first_idx = orig_off >> PGSHIFT;
+ last_idx = (orig_off + count) >> PGSHIFT;
buf_end = buf + count;
/* For each file page, make sure it's in the page cache, then write it.*/
for (int i = first_idx; i <= last_idx; i++) {
error = pm_load_page(file->f_mapping, i, &page);
assert(!error); /* TODO: handle ENOMEM and friends */
copy_amt = MIN(PGSIZE - page_off, buf_end - buf);
- /* TODO: (UMEM) (KFOP) think about this. if it's a user buffer, we're
- * relying on current to detect whose it is (which should work for async
- * calls). */
- if (current) {
+ /* TODO: (UMEM) (KFOP) think about this. */
+ if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
memcpy_from_user(current, page2kva(page) + page_off, buf, copy_amt);
- } else {
+ else
memcpy(page2kva(page) + page_off, buf, copy_amt);
- }
buf += copy_amt;
page_off = 0;
atomic_or(&page->pg_flags, PG_DIRTY);
pm_put_page(page); /* it's still in the cache, we just don't need it */
}
assert(buf == buf_end);
- *offset += count;
+ *offset = orig_off + count;
+ set_acmtime(file->f_dentry->d_inode, VFS_MTIME);
return count;
}
}
if (!count)
return 0;
+ if (!(file->f_flags & O_READ)) {
+ set_errno(EBADF);
+ return 0;
+ }
/* start readdir from where it left off: */
dirent->d_off = *offset;
for ( ;
}
/* Slight info exposure: could be extra crap after the name in the
* dirent (like the name of a deleted file) */
- if (current) {
+ if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
memcpy_to_user(current, u_buf, dirent, sizeof(struct dirent));
- } else {
+ else
memcpy(u_buf, dirent, sizeof(struct dirent));
- }
amt_copied += sizeof(struct dirent);
/* 0 signals end of directory */
if (retval == 0)
/* Next time read is called, we pick up where we left off */
*offset = dirent->d_off; /* UMEM */
/* important to tell them how much they got. they often keep going til they
- * get 0 back (in the case of ls). it's also how much has been read, but it
+ * get 0 back (in the case of ls). It's also how much has been read, but it
* isn't how much the f_pos has moved (which is opaque to the VFS). */
+ set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
return amt_copied;
}
struct inode *parent_i;
struct nameidata nd_r = {0}, *nd = &nd_r;
int error;
+ unsigned long nr_pages;
/* The file might exist, lets try to just open it right away */
nd->intent = LOOKUP_OPEN;
error = path_lookup(path, LOOKUP_FOLLOW, nd);
if (!error) {
- /* Still need to make sure we didn't want to O_EXCL create */
+ if (S_ISDIR(nd->dentry->d_inode->i_mode) && (flags & O_WRITE)) {
+ set_errno(EISDIR);
+ goto out_path_only;
+ }
+ /* Also need to make sure we didn't want to O_EXCL create */
if ((flags & O_CREAT) && (flags & O_EXCL)) {
set_errno(EEXIST);
goto out_path_only;
kref_get(&file_d->d_kref, 1);
goto open_the_file;
}
+ if (!(flags & O_CREAT)) {
+ set_errno(-error);
+ goto out_path_only;
+ }
/* So it didn't already exist, release the path from the previous lookup,
* and then we try to create it. */
- path_release(nd);
+ path_release(nd);
/* get the parent, following links. this means you get the parent of the
* final link (which may not be in 'path' in the first place. */
nd->intent = LOOKUP_CREATE;
goto out_path_only;
}
/* see if the target is there (shouldn't be), and handle accordingly */
- file_d = do_lookup(nd->dentry, nd->last.name);
+ file_d = do_lookup(nd->dentry, nd->last.name);
if (!file_d) {
if (!(flags & O_CREAT)) {
+ warn("Extremely unlikely race, probably a bug");
set_errno(ENOENT);
goto out_path_only;
}
/* now open the file (freshly created or if it already existed). At this
* point, file_d is a refcnt'd dentry, regardless of which branch we took.*/
if (flags & O_TRUNC) {
+ spin_lock(&file_d->d_inode->i_lock);
+ nr_pages = ROUNDUP(file_d->d_inode->i_size, PGSIZE) >> PGSHIFT;
file_d->d_inode->i_size = 0;
- /* TODO: probably should remove the garbage pages from the page map */
+ spin_unlock(&file_d->d_inode->i_lock);
+ pm_remove_contig(file_d->d_inode->i_mapping, 0, nr_pages);
}
file = dentry_open(file_d, flags); /* sets errno */
/* Note the fall through to the exit paths. File is 0 by default and if
goto out_path_only;
}
/* see if the target is already there, handle accordingly */
- sym_d = do_lookup(nd->dentry, nd->last.name);
+ sym_d = do_lookup(nd->dentry, nd->last.name);
if (sym_d) {
set_errno(EEXIST);
goto out_sym_d;
parent_i = nd->dentry->d_inode;
if (create_symlink(parent_i, sym_d, symname, mode))
goto out_sym_d;
+ set_acmtime(parent_i, VFS_MTIME);
dcache_put(sym_d->d_sb, sym_d);
retval = 0; /* Note the fall through to the exit paths */
out_sym_d:
}
parent_dir = nd->dentry->d_inode;
/* see if the new target is already there, handle accordingly */
- link_d = do_lookup(nd->dentry, nd->last.name);
+ link_d = do_lookup(nd->dentry, nd->last.name);
if (link_d) {
set_errno(EEXIST);
goto out_link_d;
set_errno(-error);
goto out_both_ds;
}
+ set_acmtime(parent_dir, VFS_MTIME);
/* Finally stitch it up */
inode = old_d->d_inode;
kref_get(&inode->i_kref, 1);
}
parent_dir = nd->dentry->d_inode;
/* make sure the target is there */
- dentry = do_lookup(nd->dentry, nd->last.name);
+ dentry = do_lookup(nd->dentry, nd->last.name);
if (!dentry) {
set_errno(ENOENT);
goto out_path_only;
set_errno(-error);
goto out_dentry;
}
+ set_acmtime(parent_dir, VFS_MTIME);
/* Now that our parent doesn't track us, we need to make sure we aren't
* findable via the dentry cache. DYING, so we will be freed in
* dentry_release() */
int retval = 0;
nd->intent = LOOKUP_ACCESS;
retval = path_lookup(path, 0, nd);
- path_release(nd);
+ path_release(nd);
return retval;
}
else
#endif
file->f_dentry->d_inode->i_mode = (mode & S_PMASK) | old_mode_ftype;
+ set_acmtime(file->f_dentry->d_inode, VFS_CTIME);
return 0;
}
int error;
int retval = -1;
+ /* The dir might exist and might be /, so we can't look for the parent */
+ nd->intent = LOOKUP_OPEN;
+ error = path_lookup(path, LOOKUP_FOLLOW, nd);
+ path_release(nd);
+ if (!error) {
+ set_errno(EEXIST);
+ return -1;
+ }
nd->intent = LOOKUP_CREATE;
/* get the parent, but don't follow links */
error = path_lookup(path, LOOKUP_PARENT, nd);
set_errno(-error);
goto out_path_only;
}
- /* see if the target is already there, handle accordingly */
- dentry = do_lookup(nd->dentry, nd->last.name);
- if (dentry) {
- set_errno(EEXIST);
- goto out_dentry;
- }
/* Doesn't already exist, let's try to make it: */
dentry = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
if (!dentry)
parent_i = nd->dentry->d_inode;
if (create_dir(parent_i, dentry, mode))
goto out_dentry;
+ set_acmtime(parent_i, VFS_MTIME);
dcache_put(dentry->d_sb, dentry);
retval = 0; /* Note the fall through to the exit paths */
out_dentry:
goto out_path_only;
}
/* make sure the target is already there, handle accordingly */
- dentry = do_lookup(nd->dentry, nd->last.name);
+ dentry = do_lookup(nd->dentry, nd->last.name);
if (!dentry) {
set_errno(ENOENT);
goto out_path_only;
set_errno(-error);
goto out_dentry;
}
+ set_acmtime(parent_i, VFS_MTIME);
/* Now that our parent doesn't track us, we need to make sure we aren't
* findable via the dentry cache. DYING, so we will be freed in
* dentry_release() */
if (amt_copied)
__cv_broadcast(&pii->p_cv);
cv_unlock(&pii->p_cv);
+ set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
return amt_copied;
}
if (amt_copied)
__cv_broadcast(&pii->p_cv);
cv_unlock(&pii->p_cv);
+ set_acmtime(file->f_dentry->d_inode, VFS_MTIME);
return amt_copied;
}
/* Actually build the pipe. We're using one page, hanging off the
* pipe_inode_info struct. When we release the inode, we free the pipe
* memory too */
- pipe_i->i_pipe = kmalloc(sizeof(struct pipe_inode_info), KMALLOC_WAIT);
+ pipe_i->i_pipe = kmalloc(sizeof(struct pipe_inode_info), MEM_WAIT);
pii = pipe_i->i_pipe;
if (!pii) {
set_errno(ENOMEM);
return -1;
}
+int do_rename(char *old_path, char *new_path)
+{
+ struct nameidata nd_old = {0}, *nd_o = &nd_old;
+ struct nameidata nd_new = {0}, *nd_n = &nd_new;
+ struct dentry *old_dir_d, *new_dir_d;
+ struct inode *old_dir_i, *new_dir_i;
+ struct dentry *old_d, *new_d, *unlink_d;
+ int error;
+ int retval = 0;
+
+ nd_o->intent = LOOKUP_ACCESS; /* maybe, might need another type */
+
+ /* get the parent, but don't follow links */
+ error = path_lookup(old_path, LOOKUP_PARENT | LOOKUP_DIRECTORY, nd_o);
+ if (error) {
+ set_errno(-error);
+ retval = -1;
+ goto out_old_path;
+ }
+ old_dir_d = nd_o->dentry;
+ old_dir_i = old_dir_d->d_inode;
+
+ old_d = do_lookup(old_dir_d, nd_o->last.name);
+ if (!old_d) {
+ set_errno(ENOENT);
+ retval = -1;
+ goto out_old_path;
+ }
+
+ nd_n->intent = LOOKUP_CREATE;
+ error = path_lookup(new_path, LOOKUP_PARENT | LOOKUP_DIRECTORY, nd_n);
+ if (error) {
+ set_errno(-error);
+ retval = -1;
+ goto out_paths_and_src;
+ }
+ new_dir_d = nd_n->dentry;
+ new_dir_i = new_dir_d->d_inode;
+ /* TODO if new_dir == old_dir, we might be able to simplify things */
+
+ if (new_dir_i->i_sb != old_dir_i->i_sb) {
+ set_errno(EXDEV);
+ retval = -1;
+ goto out_paths_and_src;
+ }
+ /* TODO: check_perms is lousy, want to just say "writable" here */
+ if (check_perms(old_dir_i, S_IWUSR) || check_perms(new_dir_i, S_IWUSR)) {
+ set_errno(EPERM);
+ retval = -1;
+ goto out_paths_and_src;
+ }
+ /* TODO: if we're doing a rename that moves a directory, we need to make
+ * sure the new_path doesn't include the old_path. It's not as simple as
+ * just checking, since there could be a concurrent rename that breaks the
+ * check later. e.g. what if new_dir's parent is being moved into a child
+ * of old_dir?
+ *
+ * linux has a per-fs rename mutex for these scenarios, so only one can
+ * proceed at a time. i don't see another way to deal with it either.
+ * maybe something like flagging all dentries on the new_path with "do not
+ * move". */
+
+ /* TODO: this is all very racy. right after we do a new_d lookup, someone
+ * else could create or unlink new_d. need to lock here, or else push this
+ * into the sub-FS.
+ *
+ * For any locking scheme, we probably need to lock both the old and new
+ * dirs. To prevent deadlock, we need a total ordering of all inodes (or
+ * dentries, if we locking them instead). inode number or struct inode*
+ * will work for this. */
+ new_d = do_lookup(new_dir_d, nd_n->last.name);
+ if (new_d) {
+ if (new_d->d_inode == old_d->d_inode)
+ goto out_paths_and_refs; /* rename does nothing */
+ /* TODO: Here's a bunch of other racy checks we need to do, maybe in the
+ * sub-FS:
+ *
+ * if src is a dir, dst must be an empty dir if it exists (RACYx2)
+ * racing on dst being created and it getting new entries
+ * if src is a file, dst must be a file if it exists (RACY)
+ * racing on dst being created and still being a file
+ * racing on dst being unlinked and a new one being added
+ */
+ /* TODO: we should allow empty dirs */
+ if (S_ISDIR(new_d->d_inode->i_mode)) {
+ set_errno(EISDIR);
+ retval = -1;
+ goto out_paths_and_refs;
+ }
+ /* TODO: need this to be atomic with rename */
+ error = new_dir_i->i_op->unlink(new_dir_i, new_d);
+ if (error) {
+ set_errno(-error);
+ retval = -1;
+ goto out_paths_and_refs;
+ }
+ new_d->d_flags |= DENTRY_DYING;
+ /* TODO: racy with other lookups on new_d */
+ dcache_remove(new_d->d_sb, new_d);
+ new_d->d_inode->i_nlink--; /* TODO: race here, esp with a decref */
+ kref_put(&new_d->d_kref);
+ }
+ /* new_d is just a vessel for the name. somewhat lousy. */
+ new_d = get_dentry(new_dir_d->d_sb, new_dir_d, nd_n->last.name);
+
+ /* TODO: more races. need to remove old_d from the dcache, since we're
+ * about to change its parentage. could be readded concurrently. */
+ dcache_remove(old_dir_d->d_sb, old_d);
+ error = new_dir_i->i_op->rename(old_dir_i, old_d, new_dir_i, new_d);
+ if (error) {
+ /* TODO: oh crap, we already unlinked! now we're screwed, and violated
+ * our atomicity requirements. */
+ printk("[kernel] rename failed, you might have lost data\n");
+ set_errno(-error);
+ retval = -1;
+ goto out_paths_and_refs;
+ }
+
+ /* old_dir loses old_d, new_dir gains old_d, renamed to new_d. this is
+ * particularly cumbersome since there are two levels here: the FS has its
+ * info about where things are, and the VFS has its dentry tree. and it's
+ * all racy (TODO). */
+ dentry_set_name(old_d, new_d->d_name.name);
+ old_d->d_parent = new_d->d_parent;
+ if (S_ISDIR(old_d->d_inode->i_mode)) {
+ TAILQ_REMOVE(&old_dir_d->d_subdirs, old_d, d_subdirs_link);
+ old_dir_i->i_nlink--; /* TODO: racy, etc */
+ TAILQ_INSERT_TAIL(&new_dir_d->d_subdirs, old_d, d_subdirs_link);
+ new_dir_i->i_nlink--; /* TODO: racy, etc */
+ }
+
+ /* and then the third level: dcache stuff. we could have old versions of
+ * old_d or negative versions of new_d sitting around. dcache_put should
+ * replace a potentially negative dentry for new_d (now called old_d) */
+ dcache_put(old_dir_d->d_sb, old_d);
+
+ set_acmtime(old_dir_i, VFS_MTIME);
+ set_acmtime(new_dir_i, VFS_MTIME);
+ set_acmtime(old_d->d_inode, VFS_CTIME);
+
+ /* fall-through */
+out_paths_and_refs:
+ kref_put(&new_d->d_kref);
+out_paths_and_src:
+ kref_put(&old_d->d_kref);
+out_paths:
+ path_release(nd_n);
+out_old_path:
+ path_release(nd_o);
+ return retval;
+}
+
+int do_truncate(struct inode *inode, off64_t len)
+{
+ off64_t old_len;
+
+ if (len < 0) {
+ set_errno(EINVAL);
+ return -1;
+ }
+ if (len > PiB) {
+ printk("[kernel] truncate for > petabyte, probably a bug\n");
+ /* continuing, not too concerned. could set EINVAL or EFBIG */
+ }
+ spin_lock(&inode->i_lock);
+ old_len = inode->i_size;
+ if (old_len == len) {
+ spin_unlock(&inode->i_lock);
+ return 0;
+ }
+ inode->i_size = len;
+ /* truncate can't block, since we're holding the spinlock. but it can rely
+ * on that lock being held */
+ inode->i_op->truncate(inode);
+ spin_unlock(&inode->i_lock);
+
+ if (old_len < len) {
+ pm_remove_contig(inode->i_mapping, old_len >> PGSHIFT,
+ (len >> PGSHIFT) - (old_len >> PGSHIFT));
+ }
+ set_acmtime(inode, VFS_MTIME);
+ return 0;
+}
+
struct file *alloc_file(void)
{
struct file *file = kmem_cache_alloc(file_kcache, 0);
struct file *file;
int desired_mode;
inode = dentry->d_inode;
- /* Do the mode first, since we can still error out. f_mode stores how the
- * OS file is open, which can be more restrictive than the i_mode */
- switch (flags & (O_RDONLY | O_WRONLY | O_RDWR)) {
- case O_RDONLY:
- desired_mode = S_IRUSR;
- break;
- case O_WRONLY:
- desired_mode = S_IWUSR;
- break;
- case O_RDWR:
- desired_mode = S_IRUSR | S_IWUSR;
- break;
- default:
- goto error_access;
- }
+ /* f_mode stores how the OS file is open, which can be more restrictive than
+ * the i_mode */
+ desired_mode = omode_to_rwx(flags & O_ACCMODE);
if (check_perms(inode, desired_mode))
goto error_access;
file = alloc_file();
kmem_cache_free(file_kcache, file);
}
+ssize_t kread_file(struct file *file, void *buf, size_t sz)
+{
+ /* TODO: (KFOP) (VFS kernel read/writes need to be from a ktask) */
+ uintptr_t old_ret = switch_to_ktask();
+ off64_t dummy = 0;
+ ssize_t cpy_amt = file->f_op->read(file, buf, sz, &dummy);
+
+ switch_back_from_ktask(old_ret);
+ return cpy_amt;
+}
+
+/* Reads the contents of an entire file into a buffer, returning that buffer.
+ * On error, prints something useful and returns 0 */
+void *kread_whole_file(struct file *file)
+{
+ size_t size;
+ void *contents;
+ ssize_t cpy_amt;
+
+ size = file->f_dentry->d_inode->i_size;
+ contents = kmalloc(size, MEM_WAIT);
+ cpy_amt = kread_file(file, contents, size);
+ if (cpy_amt < 0) {
+ printk("Error %d reading file %s\n", get_errno(), file_name(file));
+ kfree(contents);
+ return 0;
+ }
+ if (cpy_amt != size) {
+ printk("Read %d, needed %d for file %s\n", cpy_amt, size,
+ file_name(file));
+ kfree(contents);
+ return 0;
+ }
+ return contents;
+}
+
/* Process-related File management functions */
-/* Given any FD, get the appropriate file, 0 o/w */
-struct file *get_file_from_fd(struct files_struct *open_files, int file_desc)
+/* Given any FD, get the appropriate object, 0 o/w. Set vfs if you're looking
+ * for a file, o/w a chan. Set incref if you want a reference count (which is a
+ * 9ns thing, you can't use the pointer if you didn't incref). */
+void *lookup_fd(struct fd_table *fdt, int fd, bool incref, bool vfs)
{
- struct file *retval = 0;
- if (file_desc < 0)
+ void *retval = 0;
+ if (fd < 0)
return 0;
- spin_lock(&open_files->lock);
- if (open_files->closed) {
- spin_unlock(&open_files->lock);
+ spin_lock(&fdt->lock);
+ if (fdt->closed) {
+ spin_unlock(&fdt->lock);
return 0;
}
- if (file_desc < open_files->max_fdset) {
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
+ if (fd < fdt->max_fdset) {
+ if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, fd)) {
/* while max_files and max_fdset might not line up, we should never
* have a valid fdset higher than files */
- assert(file_desc < open_files->max_files);
- retval = open_files->fd[file_desc].fd_file;
- /* 9ns might be using this one, in which case file == 0 */
- if (retval)
- kref_get(&retval->f_kref, 1);
+ assert(fd < fdt->max_files);
+ if (vfs)
+ retval = fdt->fd[fd].fd_file;
+ else
+ retval = fdt->fd[fd].fd_chan;
+ /* retval could be 0 if we asked for the wrong one (e.g. it's a
+ * file, but we asked for a chan) */
+ if (retval && incref) {
+ if (vfs)
+ kref_get(&((struct file*)retval)->f_kref, 1);
+ else
+ chan_incref((struct chan*)retval);
+ }
}
}
- spin_unlock(&open_files->lock);
+ spin_unlock(&fdt->lock);
return retval;
}
+/* Given any FD, get the appropriate file, 0 o/w */
+struct file *get_file_from_fd(struct fd_table *open_files, int file_desc)
+{
+ return lookup_fd(open_files, file_desc, TRUE, TRUE);
+}
+
/* Grow the vfs fd set */
-static int grow_fd_set(struct files_struct *open_files) {
+static int grow_fd_set(struct fd_table *open_files)
+{
int n;
struct file_desc *nfd, *ofd;
/* Grow the open_files->fd array in increments of NR_OPEN_FILES_DEFAULT */
n = open_files->max_files + NR_OPEN_FILES_DEFAULT;
if (n > NR_FILE_DESC_MAX)
- n = NR_FILE_DESC_MAX;
+ return -EMFILE;
nfd = kzmalloc(n * sizeof(struct file_desc), 0);
if (nfd == NULL)
- return -1;
+ return -ENOMEM;
/* Move the old array on top of the new one */
ofd = open_files->fd;
}
/* Free the vfs fd set if necessary */
-static void free_fd_set(struct files_struct *open_files) {
+static void free_fd_set(struct fd_table *open_files)
+{
+ void *free_me;
if (open_files->open_fds != (struct fd_set*)&open_files->open_fds_init) {
- kfree(open_files->open_fds);
assert(open_files->fd != open_files->fd_array);
- kfree(open_files->fd);
+ /* need to reset the pointers to the internal addrs, in case we take a
+ * look while debugging. 0 them out, since they have old data. our
+ * current versions should all be closed. */
+ memset(&open_files->open_fds_init, 0, sizeof(struct small_fd_set));
+ memset(&open_files->fd_array, 0, sizeof(open_files->fd_array));
+
+ free_me = open_files->open_fds;
+ open_files->open_fds = (struct fd_set*)&open_files->open_fds_init;
+ kfree(free_me);
+
+ free_me = open_files->fd;
+ open_files->fd = open_files->fd_array;
+ kfree(free_me);
}
}
-/* 9ns: puts back an FD from the VFS-FD-space. */
-int put_fd(struct files_struct *open_files, int file_desc)
+/* If FD is in the group, remove it, decref it, and return TRUE. */
+bool close_fd(struct fd_table *fdt, int fd)
{
- if (file_desc < 0) {
- warn("Negative FD!\n");
- return 0;
- }
- spin_lock(&open_files->lock);
- if (file_desc < open_files->max_fdset) {
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
+ struct file *file = 0;
+ struct chan *chan = 0;
+ struct fd_tap *tap = 0;
+ bool ret = FALSE;
+ if (fd < 0)
+ return FALSE;
+ spin_lock(&fdt->lock);
+ if (fd < fdt->max_fdset) {
+ if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, fd)) {
/* while max_files and max_fdset might not line up, we should never
* have a valid fdset higher than files */
- assert(file_desc < open_files->max_files);
- CLR_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc);
+ assert(fd < fdt->max_files);
+ file = fdt->fd[fd].fd_file;
+ chan = fdt->fd[fd].fd_chan;
+ tap = fdt->fd[fd].fd_tap;
+ fdt->fd[fd].fd_file = 0;
+ fdt->fd[fd].fd_chan = 0;
+ fdt->fd[fd].fd_tap = 0;
+ CLR_BITMASK_BIT(fdt->open_fds->fds_bits, fd);
+ if (fd < fdt->hint_min_fd)
+ fdt->hint_min_fd = fd;
+ ret = TRUE;
}
}
- spin_unlock(&open_files->lock);
- return 0;
+ spin_unlock(&fdt->lock);
+ /* Need to decref/cclose outside of the lock; they could sleep */
+ if (file)
+ kref_put(&file->f_kref);
+ else
+ cclose(chan);
+ if (tap)
+ kref_put(&tap->kref);
+ return ret;
}
-/* Remove FD from the open files, if it was there, and return f. Currently,
- * this decref's f, so the return value is not consumable or even usable. This
- * hasn't been thought through yet. */
-struct file *put_file_from_fd(struct files_struct *open_files, int file_desc)
+void put_file_from_fd(struct fd_table *open_files, int file_desc)
{
- struct file *file = 0;
- if (file_desc < 0)
- return 0;
- spin_lock(&open_files->lock);
- if (file_desc < open_files->max_fdset) {
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
- /* while max_files and max_fdset might not line up, we should never
- * have a valid fdset higher than files */
- assert(file_desc < open_files->max_files);
- file = open_files->fd[file_desc].fd_file;
- open_files->fd[file_desc].fd_file = 0;
- assert(file); /* 9ns shouldn't call this put */
- kref_put(&file->f_kref);
- CLR_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc);
- }
- }
- spin_unlock(&open_files->lock);
- return file;
+ close_fd(open_files, file_desc);
}
-static int __get_fd(struct files_struct *open_files, int low_fd)
+static int __get_fd(struct fd_table *open_files, int low_fd, bool must_use_low)
{
int slot = -1;
+ int error;
+ bool update_hint = TRUE;
if ((low_fd < 0) || (low_fd > NR_FILE_DESC_MAX))
return -EINVAL;
if (open_files->closed)
return -EINVAL; /* won't matter, they are dying */
-
+ if (must_use_low && GET_BITMASK_BIT(open_files->open_fds->fds_bits, low_fd))
+ return -ENFILE;
+ if (low_fd > open_files->hint_min_fd)
+ update_hint = FALSE;
+ else
+ low_fd = open_files->hint_min_fd;
/* Loop until we have a valid slot (we grow the fd_array at the bottom of
* the loop if we haven't found a slot in the current array */
while (slot == -1) {
SET_BITMASK_BIT(open_files->open_fds->fds_bits, slot);
assert(slot < open_files->max_files &&
open_files->fd[slot].fd_file == 0);
- if (slot >= open_files->next_fd)
- open_files->next_fd = slot + 1;
+ /* We know slot >= hint, since we started with the hint */
+ if (update_hint)
+ open_files->hint_min_fd = slot + 1;
break;
}
if (slot == -1) {
- /* Expand the FD array and fd_set */
- if (grow_fd_set(open_files) == -1)
- return -ENOMEM;
- /* loop after growing */
+ if ((error = grow_fd_set(open_files)))
+ return error;
}
}
return slot;
}
-/* Gets and claims a free FD, used by 9ns. < 0 == error. */
-int get_fd(struct files_struct *open_files, int low_fd)
+/* Insert a file or chan (obj, chosen by vfs) into the fd group with fd_flags.
+ * If must_use_low, then we have to insert at FD = low_fd. o/w we start looking
+ * for empty slots at low_fd. */
+int insert_obj_fdt(struct fd_table *fdt, void *obj, int low_fd, int fd_flags,
+ bool must_use_low, bool vfs)
{
int slot;
- spin_lock(&open_files->lock);
- slot = __get_fd(open_files, low_fd);
- spin_unlock(&open_files->lock);
- return slot;
-}
-
-static int __claim_fd(struct files_struct *open_files, int file_desc)
-{
- if ((file_desc < 0) || (file_desc > NR_FILE_DESC_MAX))
- return -EINVAL;
- if (open_files->closed)
- return -EINVAL; /* won't matter, they are dying */
-
- /* Grow the open_files->fd_set until the file_desc can fit inside it */
- while(file_desc >= open_files->max_files) {
- grow_fd_set(open_files);
- cpu_relax();
- }
-
- /* If we haven't grown, this could be a problem, so check for it */
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc))
- return -ENFILE; /* Should never really happen. Here to catch bugs. */
-
- SET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc);
- assert(file_desc < open_files->max_files && open_files->fd[0].fd_file == 0);
- if (file_desc >= open_files->next_fd)
- open_files->next_fd = file_desc + 1;
- return 0;
-}
-
-/* Claims a specific FD when duping FDs. used by 9ns. < 0 == error. */
-int claim_fd(struct files_struct *open_files, int file_desc)
-{
- int ret;
- spin_lock(&open_files->lock);
- ret = __claim_fd(open_files, file_desc);
- spin_unlock(&open_files->lock);
- return ret;
-}
-
-/* Inserts the file in the files_struct, returning the corresponding new file
- * descriptor, or an error code. We start looking for open fds from low_fd. */
-int insert_file(struct files_struct *open_files, struct file *file, int low_fd)
-{
- int slot;
- spin_lock(&open_files->lock);
- slot = __get_fd(open_files, low_fd);
+ spin_lock(&fdt->lock);
+ slot = __get_fd(fdt, low_fd, must_use_low);
if (slot < 0) {
- spin_unlock(&open_files->lock);
+ spin_unlock(&fdt->lock);
return slot;
}
- assert(slot < open_files->max_files &&
- open_files->fd[slot].fd_file == 0);
- kref_get(&file->f_kref, 1);
- open_files->fd[slot].fd_file = file;
- open_files->fd[slot].fd_flags = 0;
- spin_unlock(&open_files->lock);
+ assert(slot < fdt->max_files &&
+ fdt->fd[slot].fd_file == 0);
+ if (vfs) {
+ kref_get(&((struct file*)obj)->f_kref, 1);
+ fdt->fd[slot].fd_file = obj;
+ fdt->fd[slot].fd_chan = 0;
+ } else {
+ chan_incref((struct chan*)obj);
+ fdt->fd[slot].fd_file = 0;
+ fdt->fd[slot].fd_chan = obj;
+ }
+ fdt->fd[slot].fd_flags = fd_flags;
+ spin_unlock(&fdt->lock);
return slot;
}
+/* Inserts the file in the fd_table, returning the corresponding new file
+ * descriptor, or an error code. We start looking for open fds from low_fd.
+ *
+ * Passing cloexec is a bit cheap, since we might want to expand it to support
+ * more FD options in the future. */
+int insert_file(struct fd_table *open_files, struct file *file, int low_fd,
+ bool must, bool cloexec)
+{
+ return insert_obj_fdt(open_files, file, low_fd, cloexec ? FD_CLOEXEC : 0,
+ must, TRUE);
+}
+
/* Closes all open files. Mostly just a "put" for all files. If cloexec, it
- * will only close files that are opened with O_CLOEXEC. */
-void close_all_files(struct files_struct *open_files, bool cloexec)
+ * will only close the FDs with FD_CLOEXEC (opened with O_CLOEXEC or fcntld).
+ *
+ * Notes on concurrency:
+ * - Can't hold spinlocks while we call cclose, since it might sleep eventually.
+ * - We're called from proc_destroy, so we could have concurrent openers trying
+ * to add to the group (other syscalls), hence the "closed" flag.
+ * - dot and slash chans are dealt with in proc_free. its difficult to close
+ * and zero those with concurrent syscalls, since those are a source of krefs.
+ * - Once we lock and set closed, no further additions can happen. To simplify
+ * our closes, we also allow multiple calls to this func (though that should
+ * never happen with the current code). */
+void close_fdt(struct fd_table *fdt, bool cloexec)
{
struct file *file;
- spin_lock(&open_files->lock);
- if (open_files->closed) {
- spin_unlock(&open_files->lock);
+ struct chan *chan;
+ struct file_desc *to_close;
+ int idx = 0;
+
+ to_close = kzmalloc(sizeof(struct file_desc) * fdt->max_files,
+ MEM_WAIT);
+ spin_lock(&fdt->lock);
+ if (fdt->closed) {
+ spin_unlock(&fdt->lock);
+ kfree(to_close);
return;
}
- for (int i = 0; i < open_files->max_fdset; i++) {
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, i)) {
+ for (int i = 0; i < fdt->max_fdset; i++) {
+ if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, i)) {
/* while max_files and max_fdset might not line up, we should never
* have a valid fdset higher than files */
- assert(i < open_files->max_files);
- file = open_files->fd[i].fd_file;
- /* no file == 9ns uses the FD. they will deal with it */
- if (!file)
- continue;
- if (cloexec && !(open_files->fd[i].fd_flags & O_CLOEXEC))
+ assert(i < fdt->max_files);
+ if (cloexec && !(fdt->fd[i].fd_flags & FD_CLOEXEC))
continue;
- /* Actually close the file */
- open_files->fd[i].fd_file = 0;
- assert(file);
- kref_put(&file->f_kref);
- CLR_BITMASK_BIT(open_files->open_fds->fds_bits, i);
+ file = fdt->fd[i].fd_file;
+ chan = fdt->fd[i].fd_chan;
+ to_close[idx].fd_tap = fdt->fd[i].fd_tap;
+ fdt->fd[i].fd_tap = 0;
+ if (file) {
+ fdt->fd[i].fd_file = 0;
+ to_close[idx++].fd_file = file;
+ } else {
+ fdt->fd[i].fd_chan = 0;
+ to_close[idx++].fd_chan = chan;
+ }
+ CLR_BITMASK_BIT(fdt->open_fds->fds_bits, i);
}
}
+ /* it's just a hint, we can build back up from being 0 */
+ fdt->hint_min_fd = 0;
if (!cloexec) {
- free_fd_set(open_files);
- open_files->closed = TRUE;
+ free_fd_set(fdt);
+ fdt->closed = TRUE;
+ }
+ spin_unlock(&fdt->lock);
+ /* We go through some hoops to close/decref outside the lock. Nice for not
+ * holding the lock for a while; critical in case the decref/cclose sleeps
+ * (it can) */
+ for (int i = 0; i < idx; i++) {
+ if (to_close[i].fd_file)
+ kref_put(&to_close[i].fd_file->f_kref);
+ else
+ cclose(to_close[i].fd_chan);
+ if (to_close[i].fd_tap)
+ kref_put(&to_close[i].fd_tap->kref);
}
- spin_unlock(&open_files->lock);
+ kfree(to_close);
}
/* Inserts all of the files from src into dst, used by sys_fork(). */
-void clone_files(struct files_struct *src, struct files_struct *dst)
+void clone_fdt(struct fd_table *src, struct fd_table *dst)
{
struct file *file;
+ struct chan *chan;
+ int ret;
+
spin_lock(&src->lock);
if (src->closed) {
spin_unlock(&src->lock);
spin_unlock(&src->lock);
return;
}
+ while (src->max_files > dst->max_files) {
+ ret = grow_fd_set(dst);
+ if (ret < 0) {
+ set_error(-ret, "Failed to grow for a clone_fdt");
+ spin_unlock(&dst->lock);
+ spin_unlock(&src->lock);
+ return;
+ }
+ }
for (int i = 0; i < src->max_fdset; i++) {
if (GET_BITMASK_BIT(src->open_fds->fds_bits, i)) {
/* while max_files and max_fdset might not line up, we should never
* have a valid fdset higher than files */
assert(i < src->max_files);
file = src->fd[i].fd_file;
+ chan = src->fd[i].fd_chan;
assert(i < dst->max_files && dst->fd[i].fd_file == 0);
SET_BITMASK_BIT(dst->open_fds->fds_bits, i);
dst->fd[i].fd_file = file;
- /* no file means 9ns is using it, they clone separately */
+ dst->fd[i].fd_chan = chan;
if (file)
kref_get(&file->f_kref, 1);
- if (i >= dst->next_fd)
- dst->next_fd = i + 1;
+ else
+ chan_incref(chan);
}
}
+ dst->hint_min_fd = src->hint_min_fd;
spin_unlock(&dst->lock);
spin_unlock(&src->lock);
}
+static void __chpwd(struct fs_struct *fs_env, struct dentry *new_pwd)
+{
+ struct dentry *old_pwd;
+ kref_get(&new_pwd->d_kref, 1);
+ /* writer lock, make sure we replace pwd with ours. could also CAS.
+ * readers don't lock at all, so they need to either loop, or we need to
+ * delay releasing old_pwd til an RCU grace period. */
+ spin_lock(&fs_env->lock);
+ old_pwd = fs_env->pwd;
+ fs_env->pwd = new_pwd;
+ spin_unlock(&fs_env->lock);
+ kref_put(&old_pwd->d_kref);
+}
+
/* Change the working directory of the given fs env (one per process, at this
- * point). Returns 0 for success, -ERROR for whatever error. */
+ * point). Returns 0 for success, sets errno and returns -1 otherwise. */
int do_chdir(struct fs_struct *fs_env, char *path)
{
struct nameidata nd_r = {0}, *nd = &nd_r;
- int retval;
- retval = path_lookup(path, LOOKUP_DIRECTORY, nd);
- if (!retval) {
- /* nd->dentry is the place we want our PWD to be */
- kref_get(&nd->dentry->d_kref, 1);
- kref_put(&fs_env->pwd->d_kref);
- fs_env->pwd = nd->dentry;
+ int error;
+ error = path_lookup(path, LOOKUP_DIRECTORY, nd);
+ if (error) {
+ set_errno(-error);
+ path_release(nd);
+ return -1;
}
+ /* nd->dentry is the place we want our PWD to be */
+ __chpwd(fs_env, nd->dentry);
path_release(nd);
- return retval;
+ return 0;
+}
+
+int do_fchdir(struct fs_struct *fs_env, struct file *file)
+{
+ if ((file->f_dentry->d_inode->i_mode & __S_IFMT) != __S_IFDIR) {
+ set_errno(ENOTDIR);
+ return -1;
+ }
+ __chpwd(fs_env, file->f_dentry);
+ return 0;
}
/* Returns a null-terminated string of up to length cwd_l containing the
kbuf[cwd_l - 2] = '/';
/* for each dentry in the path, all the way back to the root of fs_env, we
* grab the dentry name, push path_start back enough, and write in the name,
- * using /'s to terminate. We skip the root, since we don't want it's
+ * using /'s to terminate. We skip the root, since we don't want its
* actual name, just "/", which is set before each loop. */
path_start = kbuf + cwd_l - 2; /* the last byte written */
while (dentry != fs_env->root) {
return 0;
}
path_start -= link_len;
- strncpy(path_start, dentry->d_name.name, link_len);
+ memmove(path_start, dentry->d_name.name, link_len);
path_start--;
*path_start = '/';
- dentry = dentry->d_parent;
+ dentry = dentry->d_parent;
}
return path_start;
}
default:
warn("Look around you! Unknown filetype!");
}
- kref_put(&child_d->d_kref);
+ kref_put(&child_d->d_kref);
}
loop_next:
if (retval <= 0)