* Default implementations and global values for the VFS. */
#include <vfs.h> // keep this first
+#include <ros/errno.h>
#include <sys/queue.h>
#include <assert.h>
#include <stdio.h>
#include <pmap.h>
#include <umem.h>
#include <smp.h>
+#include <ns.h>
+#include <fdtap.h>
struct sb_tailq super_blocks = TAILQ_HEAD_INITIALIZER(super_blocks);
spinlock_t super_blocks_lock = SPINLOCK_INITIALIZER;
struct kmem_cache *inode_kcache;
struct kmem_cache *file_kcache;
+enum {
+ VFS_MTIME,
+ VFS_CTIME,
+ VFS_ATIME,
+};
+
+/* mtime implies ctime implies atime. */
+static void set_acmtime(struct inode *inode, int which)
+{
+ struct timespec now = nsec2timespec(epoch_nsec());
+
+ switch (which) {
+ case VFS_MTIME:
+ inode->i_mtime.tv_sec = now.tv_sec;
+ inode->i_mtime.tv_nsec = now.tv_nsec;
+ /* fall through */
+ case VFS_CTIME:
+ inode->i_ctime.tv_sec = now.tv_sec;
+ inode->i_ctime.tv_nsec = now.tv_nsec;
+ /* fall through */
+ case VFS_ATIME:
+ inode->i_atime.tv_sec = now.tv_sec;
+ inode->i_atime.tv_nsec = now.tv_nsec;
+ }
+}
+
/* Mounts fs from dev_name at mnt_pt in namespace ns. There could be no mnt_pt,
* such as with the root of (the default) namespace. Not sure how it would work
* with multiple namespaces on the same FS yet. Note if you mount the same FS
struct fs_type *fs;
dentry_kcache = kmem_cache_create("dentry", sizeof(struct dentry),
- __alignof__(struct dentry), 0, 0, 0);
+ __alignof__(struct dentry), 0,
+ NULL, 0, 0, NULL);
inode_kcache = kmem_cache_create("inode", sizeof(struct inode),
- __alignof__(struct inode), 0, 0, 0);
+ __alignof__(struct inode), 0, NULL,
+ 0, 0, NULL);
file_kcache = kmem_cache_create("file", sizeof(struct file),
- __alignof__(struct file), 0, 0, 0);
+ __alignof__(struct file), 0, NULL, 0,
+ 0, NULL);
/* default NS never dies, +1 to exist */
kref_init(&default_ns.kref, fake_release, 1);
spinlock_init(&default_ns.lock);
return file->f_dentry->d_name.name;
}
+static int prepend(char **pbuf, size_t *pbuflen, const char *str, size_t len)
+{
+ if (*pbuflen < len)
+ return -ENAMETOOLONG;
+ *pbuflen -= len;
+ *pbuf -= len;
+ memcpy(*pbuf, str, len);
+
+ return 0;
+}
+
+char *dentry_path(struct dentry *dentry, char *path, size_t max_size)
+{
+ size_t csize = max_size;
+ char *path_start = path + max_size, *base;
+
+ if (prepend(&path_start, &csize, "\0", 1) < 0 || csize < 1)
+ return NULL;
+ /* Handle the case that the passed dentry is the root. */
+ base = path_start - 1;
+ *base = '/';
+ while (!DENTRY_IS_ROOT(dentry)) {
+ if (prepend(&path_start, &csize, dentry->d_name.name,
+ dentry->d_name.len) < 0 ||
+ prepend(&path_start, &csize, "/", 1) < 0)
+ return NULL;
+ base = path_start;
+ dentry = dentry->d_parent;
+ }
+
+ return base;
+}
+
/* Some issues with this, coupled closely to fs_lookup.
*
* Note the use of __dentry_free, instead of kref_put. In those cases, we don't
warn("OOM in do_lookup(), probably wasn't expected\n");
return 0;
}
- result = dcache_get(parent->d_sb, query);
+ result = dcache_get(parent->d_sb, query);
if (result) {
__dentry_free(query);
return result;
/* TODO: if the following are done by us, how do we know the i_ino?
* also need to handle inodes that are already read in! For now, we're
- * going to have the FS handle it in it's lookup() method:
+ * going to have the FS handle it in its lookup() method:
* - get a new inode
* - read in the inode
* - put in the inode cache */
nd->depth++;
symname = nd->dentry->d_inode->i_op->readlink(nd->dentry);
/* We need to pin in nd->dentry (the dentry of the symlink), since we need
- * it's symname's storage to stay in memory throughout the upcoming
+ * its symname's storage to stay in memory throughout the upcoming
* link_path_walk(). The last_sym gets decreffed when we path_release() or
* follow another symlink. */
if (nd->last_sym)
if (!current)
nd->dentry = default_ns.root->mnt_root;
else
- nd->dentry = current->fs_env.root;
+ nd->dentry = current->fs_env.root;
nd->mnt = nd->dentry->d_sb->s_mount;
kref_get(&nd->mnt->mnt_kref, 1);
kref_get(&nd->dentry->d_kref, 1);
return FALSE;
}
-/* Simple helper to set nd to track it's last name to be Name. Also be careful
+/* Simple helper to set nd to track its last name to be Name. Also be careful
* with the storage of name. Don't use and nd's name past the lifetime of the
* string used in the path_lookup()/link_path_walk/whatever. Consider replacing
* parts of this with a qstr builder. Note this uses the dentry's d_op, which
}
/* Given path, return the inode for the final dentry. The ND should be
- * initialized for the first call - specifically, we need the intent.
+ * initialized for the first call - specifically, we need the intent.
* LOOKUP_PARENT and friends go in the flags var, which is not the intent.
*
* If path_lookup wants a PARENT, but hits the top of the FS (root or
if (!current)
nd->dentry = default_ns.root->mnt_root;
else
- nd->dentry = current->fs_env.root;
+ nd->dentry = current->fs_env.root;
} else { /* relative lookup */
assert(current);
/* Don't need to lock on the fs_env since we're reading one item */
- nd->dentry = current->fs_env.pwd;
+ nd->dentry = current->fs_env.pwd;
}
nd->mnt = nd->dentry->d_sb->s_mount;
/* Whenever references get put in the nd, incref them. Whenever they are
kref_get(&nd->dentry->d_kref, 1);
nd->flags = flags;
nd->depth = 0; /* used in symlink following */
- retval = link_path_walk(path, nd);
+ retval = link_path_walk(path, nd);
/* make sure our PARENT lookup worked */
if (!retval && (flags & LOOKUP_PARENT))
assert(nd->last.name);
retval = path_lookup(path, LOOKUP_DIRECTORY, nd);
if (retval)
goto out;
- /* taking the namespace of the vfsmount of path */
+ /* taking the namespace of the vfsmount of path */
if (!__mount_fs(fs, dev_name, nd->dentry, flags, nd->mnt->mnt_namespace))
retval = -EINVAL;
out:
/* Helper to alloc and initialize a generic superblock. This handles all the
* VFS related things, like lists. Each FS will need to handle its own things
- * in it's *_get_sb(), usually involving reading off the disc. */
+ * in its *_get_sb(), usually involving reading off the disc. */
struct super_block *get_sb(void)
{
struct super_block *sb = kmalloc(sizeof(struct super_block), 0);
/* Final stages of initializing a super block, including creating and linking
* the root dentry, root inode, vmnt, and sb. The d_op and root_ino are
- * FS-specific, but otherwise it's FS-independent, tricky, and not worth having
+ * FS-specific, but otherwise its FS-independent, tricky, and not worth having
* around multiple times.
*
* Not the world's best interface, so it's subject to change, esp since we're
size_t name_len = strnlen(name, MAX_FILENAME_SZ); /* not including \0! */
char *l_name = 0;
if (name_len < DNAME_INLINE_LEN) {
- strncpy(dentry->d_iname, name, name_len);
- dentry->d_iname[name_len] = '\0';
+ strlcpy(dentry->d_iname, name, name_len + 1);
qstr_builder(dentry, 0);
} else {
l_name = kmalloc(name_len + 1, 0);
assert(l_name);
- strncpy(l_name, name, name_len);
- l_name[name_len] = '\0';
+ strlcpy(l_name, name, name_len + 1);
qstr_builder(dentry, l_name);
}
}
* note we don't pass this an nd, like Linux does... */
static struct inode *create_inode(struct dentry *dentry, int mode)
{
- uint64_t now = epoch_seconds();
/* note it is the i_ino that uniquely identifies a file in the specific
* filesystem. there's a diff between creating an inode (even for an in-use
* ino) and then filling it in, and vs creating a brand new one.
inode->i_nlink = 1;
inode->i_size = 0;
inode->i_blocks = 0;
- inode->i_atime.tv_sec = now;
- inode->i_ctime.tv_sec = now;
- inode->i_mtime.tv_sec = now;
- inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
+ set_acmtime(inode, VFS_MTIME);
inode->i_bdev = inode->i_sb->s_bdev;
/* when we have notions of users, do something here: */
inode->i_uid = 0;
if (!new_file)
return -1;
dir->i_op->create(dir, dentry, mode, 0);
+ set_acmtime(dir, VFS_MTIME);
icache_put(new_file->i_sb, new_file);
kref_put(&new_file->i_kref);
return 0;
assert(parent && parent == TAILQ_LAST(&dir->i_dentry, dentry_tailq));
/* parent dentry tracks dentry as a subdir, weak reference */
TAILQ_INSERT_TAIL(&parent->d_subdirs, dentry, d_subdirs_link);
+ set_acmtime(dir, VFS_MTIME);
icache_put(new_dir->i_sb, new_dir);
kref_put(&new_dir->i_kref);
return 0;
if (!new_sym)
return -1;
dir->i_op->symlink(dir, dentry, symname);
+ set_acmtime(dir, VFS_MTIME);
icache_put(new_sym->i_sb, new_sym);
kref_put(&new_sym->i_kref);
return 0;
kstat->st_size = inode->i_size;
kstat->st_blksize = inode->i_blksize;
kstat->st_blocks = inode->i_blocks;
- kstat->st_atime = inode->i_atime;
- kstat->st_mtime = inode->i_mtime;
- kstat->st_ctime = inode->i_ctime;
+ kstat->st_atim = inode->i_atime;
+ kstat->st_mtim = inode->i_mtime;
+ kstat->st_ctim = inode->i_ctime;
}
void print_kstat(struct kstat *kstat)
printk("\tst_size : %p\n", kstat->st_size);
printk("\tst_blksize: %p\n", kstat->st_blksize);
printk("\tst_blocks : %p\n", kstat->st_blocks);
- printk("\tst_atime : %p\n", kstat->st_atime);
- printk("\tst_mtime : %p\n", kstat->st_mtime);
- printk("\tst_ctime : %p\n", kstat->st_ctime);
+ printk("\tst_atime : %p\n", kstat->st_atim);
+ printk("\tst_mtime : %p\n", kstat->st_mtim);
+ printk("\tst_ctime : %p\n", kstat->st_ctim);
}
/* Inode Cache management. In general, search on the ino, get a refcnt'd value
/* Consider pushing some error checking higher in the VFS */
if (!count)
return 0;
+ if (!(file->f_flags & O_READ)) {
+ set_errno(EBADF);
+ return 0;
+ }
if (orig_off >= file->f_dentry->d_inode->i_size)
return 0; /* EOF */
/* Make sure we don't go past the end of the file */
error = pm_load_page(file->f_mapping, i, &page);
assert(!error); /* TODO: handle ENOMEM and friends */
copy_amt = MIN(PGSIZE - page_off, buf_end - buf);
- /* TODO: (UMEM) think about this. if it's a user buffer, we're relying
- * on current to detect whose it is (which should work for async calls).
- * Also, need to propagate errors properly... Probably should do a
- * user_mem_check, then free, and also to make a distinction between
- * when the kernel wants a read/write (TODO: KFOP) */
- if (current) {
+ /* TODO: (KFOP) Probably shouldn't do this. Either memcpy directly, or
+ * split out the is_user_r(w)addr from copy_{to,from}_user() */
+ if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
memcpy_to_user(current, buf, page2kva(page) + page_off, copy_amt);
- } else {
+ else
memcpy(buf, page2kva(page) + page_off, copy_amt);
- }
buf += copy_amt;
page_off = 0;
pm_put_page(page); /* it's still in the cache, we just don't need it */
* safe. but at least it'll be a value that one of the concurrent ops could
* have produced (compared to *offset_changed_concurrently += count. */
*offset = orig_off + count;
+ set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
return count;
}
/* Consider pushing some error checking higher in the VFS */
if (!count)
return 0;
+ if (!(file->f_flags & O_WRITE)) {
+ set_errno(EBADF);
+ return 0;
+ }
if (file->f_flags & O_APPEND) {
spin_lock(&file->f_dentry->d_inode->i_lock);
orig_off = file->f_dentry->d_inode->i_size;
error = pm_load_page(file->f_mapping, i, &page);
assert(!error); /* TODO: handle ENOMEM and friends */
copy_amt = MIN(PGSIZE - page_off, buf_end - buf);
- /* TODO: (UMEM) (KFOP) think about this. if it's a user buffer, we're
- * relying on current to detect whose it is (which should work for async
- * calls). */
- if (current) {
+ /* TODO: (UMEM) (KFOP) think about this. */
+ if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
memcpy_from_user(current, page2kva(page) + page_off, buf, copy_amt);
- } else {
+ else
memcpy(page2kva(page) + page_off, buf, copy_amt);
- }
buf += copy_amt;
page_off = 0;
atomic_or(&page->pg_flags, PG_DIRTY);
}
assert(buf == buf_end);
*offset = orig_off + count;
+ set_acmtime(file->f_dentry->d_inode, VFS_MTIME);
return count;
}
}
if (!count)
return 0;
+ if (!(file->f_flags & O_READ)) {
+ set_errno(EBADF);
+ return 0;
+ }
/* start readdir from where it left off: */
dirent->d_off = *offset;
for ( ;
}
/* Slight info exposure: could be extra crap after the name in the
* dirent (like the name of a deleted file) */
- if (current) {
+ if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
memcpy_to_user(current, u_buf, dirent, sizeof(struct dirent));
- } else {
+ else
memcpy(u_buf, dirent, sizeof(struct dirent));
- }
amt_copied += sizeof(struct dirent);
/* 0 signals end of directory */
if (retval == 0)
/* Next time read is called, we pick up where we left off */
*offset = dirent->d_off; /* UMEM */
/* important to tell them how much they got. they often keep going til they
- * get 0 back (in the case of ls). it's also how much has been read, but it
+ * get 0 back (in the case of ls). It's also how much has been read, but it
* isn't how much the f_pos has moved (which is opaque to the VFS). */
+ set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
return amt_copied;
}
nd->intent = LOOKUP_OPEN;
error = path_lookup(path, LOOKUP_FOLLOW, nd);
if (!error) {
- /* If this is a directory, make sure we are opening with O_RDONLY.
- * Unfortunately we can't just check for O_RDONLY directly because its
- * value is 0x0. We instead have to make sure it's not O_WRONLY and
- * not O_RDWR explicitly. */
- if (S_ISDIR(nd->dentry->d_inode->i_mode) &&
- ((flags & O_WRONLY) || (flags & O_RDWR))) {
+ if (S_ISDIR(nd->dentry->d_inode->i_mode) && (flags & O_WRITE)) {
set_errno(EISDIR);
goto out_path_only;
}
}
/* So it didn't already exist, release the path from the previous lookup,
* and then we try to create it. */
- path_release(nd);
+ path_release(nd);
/* get the parent, following links. this means you get the parent of the
* final link (which may not be in 'path' in the first place. */
nd->intent = LOOKUP_CREATE;
goto out_path_only;
}
/* see if the target is there (shouldn't be), and handle accordingly */
- file_d = do_lookup(nd->dentry, nd->last.name);
+ file_d = do_lookup(nd->dentry, nd->last.name);
if (!file_d) {
if (!(flags & O_CREAT)) {
warn("Extremely unlikely race, probably a bug");
goto out_path_only;
}
/* see if the target is already there, handle accordingly */
- sym_d = do_lookup(nd->dentry, nd->last.name);
+ sym_d = do_lookup(nd->dentry, nd->last.name);
if (sym_d) {
set_errno(EEXIST);
goto out_sym_d;
parent_i = nd->dentry->d_inode;
if (create_symlink(parent_i, sym_d, symname, mode))
goto out_sym_d;
+ set_acmtime(parent_i, VFS_MTIME);
dcache_put(sym_d->d_sb, sym_d);
retval = 0; /* Note the fall through to the exit paths */
out_sym_d:
}
parent_dir = nd->dentry->d_inode;
/* see if the new target is already there, handle accordingly */
- link_d = do_lookup(nd->dentry, nd->last.name);
+ link_d = do_lookup(nd->dentry, nd->last.name);
if (link_d) {
set_errno(EEXIST);
goto out_link_d;
set_errno(-error);
goto out_both_ds;
}
+ set_acmtime(parent_dir, VFS_MTIME);
/* Finally stitch it up */
inode = old_d->d_inode;
kref_get(&inode->i_kref, 1);
}
parent_dir = nd->dentry->d_inode;
/* make sure the target is there */
- dentry = do_lookup(nd->dentry, nd->last.name);
+ dentry = do_lookup(nd->dentry, nd->last.name);
if (!dentry) {
set_errno(ENOENT);
goto out_path_only;
set_errno(-error);
goto out_dentry;
}
+ set_acmtime(parent_dir, VFS_MTIME);
/* Now that our parent doesn't track us, we need to make sure we aren't
* findable via the dentry cache. DYING, so we will be freed in
* dentry_release() */
int retval = 0;
nd->intent = LOOKUP_ACCESS;
retval = path_lookup(path, 0, nd);
- path_release(nd);
+ path_release(nd);
return retval;
}
else
#endif
file->f_dentry->d_inode->i_mode = (mode & S_PMASK) | old_mode_ftype;
+ set_acmtime(file->f_dentry->d_inode, VFS_CTIME);
return 0;
}
int error;
int retval = -1;
+ /* The dir might exist and might be /, so we can't look for the parent */
+ nd->intent = LOOKUP_OPEN;
+ error = path_lookup(path, LOOKUP_FOLLOW, nd);
+ path_release(nd);
+ if (!error) {
+ set_errno(EEXIST);
+ return -1;
+ }
nd->intent = LOOKUP_CREATE;
/* get the parent, but don't follow links */
error = path_lookup(path, LOOKUP_PARENT, nd);
set_errno(-error);
goto out_path_only;
}
- /* see if the target is already there, handle accordingly */
- dentry = do_lookup(nd->dentry, nd->last.name);
- if (dentry) {
- set_errno(EEXIST);
- goto out_dentry;
- }
/* Doesn't already exist, let's try to make it: */
dentry = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
if (!dentry)
parent_i = nd->dentry->d_inode;
if (create_dir(parent_i, dentry, mode))
goto out_dentry;
+ set_acmtime(parent_i, VFS_MTIME);
dcache_put(dentry->d_sb, dentry);
retval = 0; /* Note the fall through to the exit paths */
out_dentry:
goto out_path_only;
}
/* make sure the target is already there, handle accordingly */
- dentry = do_lookup(nd->dentry, nd->last.name);
+ dentry = do_lookup(nd->dentry, nd->last.name);
if (!dentry) {
set_errno(ENOENT);
goto out_path_only;
set_errno(-error);
goto out_dentry;
}
+ set_acmtime(parent_i, VFS_MTIME);
/* Now that our parent doesn't track us, we need to make sure we aren't
* findable via the dentry cache. DYING, so we will be freed in
* dentry_release() */
if (amt_copied)
__cv_broadcast(&pii->p_cv);
cv_unlock(&pii->p_cv);
+ set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
return amt_copied;
}
if (amt_copied)
__cv_broadcast(&pii->p_cv);
cv_unlock(&pii->p_cv);
+ set_acmtime(file->f_dentry->d_inode, VFS_MTIME);
return amt_copied;
}
/* Actually build the pipe. We're using one page, hanging off the
* pipe_inode_info struct. When we release the inode, we free the pipe
* memory too */
- pipe_i->i_pipe = kmalloc(sizeof(struct pipe_inode_info), KMALLOC_WAIT);
+ pipe_i->i_pipe = kmalloc(sizeof(struct pipe_inode_info), MEM_WAIT);
pii = pipe_i->i_pipe;
if (!pii) {
set_errno(ENOMEM);
struct dentry *old_d, *new_d, *unlink_d;
int error;
int retval = 0;
- uint64_t now;
nd_o->intent = LOOKUP_ACCESS; /* maybe, might need another type */
goto out_paths_and_src;
}
/* TODO: if we're doing a rename that moves a directory, we need to make
- * sure the new_path doesn't include the old_path. it's not as simple as
+ * sure the new_path doesn't include the old_path. It's not as simple as
* just checking, since there could be a concurrent rename that breaks the
* check later. e.g. what if new_dir's parent is being moved into a child
* of old_dir?
* replace a potentially negative dentry for new_d (now called old_d) */
dcache_put(old_dir_d->d_sb, old_d);
- /* TODO could have a helper for this, but it's going away soon */
- now = epoch_seconds();
- old_dir_i->i_ctime.tv_sec = now;
- old_dir_i->i_mtime.tv_sec = now;
- old_dir_i->i_ctime.tv_nsec = 0;
- old_dir_i->i_mtime.tv_nsec = 0;
- new_dir_i->i_ctime.tv_sec = now;
- new_dir_i->i_mtime.tv_sec = now;
- new_dir_i->i_ctime.tv_nsec = 0;
- new_dir_i->i_mtime.tv_nsec = 0;
+ set_acmtime(old_dir_i, VFS_MTIME);
+ set_acmtime(new_dir_i, VFS_MTIME);
+ set_acmtime(old_d->d_inode, VFS_CTIME);
/* fall-through */
out_paths_and_refs:
int do_truncate(struct inode *inode, off64_t len)
{
off64_t old_len;
- uint64_t now;
+
if (len < 0) {
set_errno(EINVAL);
return -1;
pm_remove_contig(inode->i_mapping, old_len >> PGSHIFT,
(len >> PGSHIFT) - (old_len >> PGSHIFT));
}
- now = epoch_seconds();
- inode->i_ctime.tv_sec = now;
- inode->i_mtime.tv_sec = now;
- inode->i_ctime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
+ set_acmtime(inode, VFS_MTIME);
return 0;
}
struct file *file;
int desired_mode;
inode = dentry->d_inode;
- /* Do the mode first, since we can still error out. f_mode stores how the
- * OS file is open, which can be more restrictive than the i_mode */
- switch (flags & (O_RDONLY | O_WRONLY | O_RDWR)) {
- case O_RDONLY:
- desired_mode = S_IRUSR;
- break;
- case O_WRONLY:
- desired_mode = S_IWUSR;
- break;
- case O_RDWR:
- desired_mode = S_IRUSR | S_IWUSR;
- break;
- default:
- goto error_access;
- }
+ /* f_mode stores how the OS file is open, which can be more restrictive than
+ * the i_mode */
+ desired_mode = omode_to_rwx(flags & O_ACCMODE);
if (check_perms(inode, desired_mode))
goto error_access;
file = alloc_file();
kmem_cache_free(file_kcache, file);
}
+ssize_t kread_file(struct file *file, void *buf, size_t sz)
+{
+ /* TODO: (KFOP) (VFS kernel read/writes need to be from a ktask) */
+ uintptr_t old_ret = switch_to_ktask();
+ off64_t dummy = 0;
+ ssize_t cpy_amt = file->f_op->read(file, buf, sz, &dummy);
+
+ switch_back_from_ktask(old_ret);
+ return cpy_amt;
+}
+
+/* Reads the contents of an entire file into a buffer, returning that buffer.
+ * On error, prints something useful and returns 0 */
+void *kread_whole_file(struct file *file)
+{
+ size_t size;
+ void *contents;
+ ssize_t cpy_amt;
+
+ size = file->f_dentry->d_inode->i_size;
+ contents = kmalloc(size, MEM_WAIT);
+ cpy_amt = kread_file(file, contents, size);
+ if (cpy_amt < 0) {
+ printk("Error %d reading file %s\n", get_errno(), file_name(file));
+ kfree(contents);
+ return 0;
+ }
+ if (cpy_amt != size) {
+ printk("Read %d, needed %d for file %s\n", cpy_amt, size,
+ file_name(file));
+ kfree(contents);
+ return 0;
+ }
+ return contents;
+}
+
/* Process-related File management functions */
-/* Given any FD, get the appropriate file, 0 o/w */
-struct file *get_file_from_fd(struct files_struct *open_files, int file_desc)
+/* Given any FD, get the appropriate object, 0 o/w. Set vfs if you're looking
+ * for a file, o/w a chan. Set incref if you want a reference count (which is a
+ * 9ns thing, you can't use the pointer if you didn't incref). */
+void *lookup_fd(struct fd_table *fdt, int fd, bool incref, bool vfs)
{
- struct file *retval = 0;
- if (file_desc < 0)
+ void *retval = 0;
+ if (fd < 0)
return 0;
- spin_lock(&open_files->lock);
- if (open_files->closed) {
- spin_unlock(&open_files->lock);
+ spin_lock(&fdt->lock);
+ if (fdt->closed) {
+ spin_unlock(&fdt->lock);
return 0;
}
- if (file_desc < open_files->max_fdset) {
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
+ if (fd < fdt->max_fdset) {
+ if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, fd)) {
/* while max_files and max_fdset might not line up, we should never
* have a valid fdset higher than files */
- assert(file_desc < open_files->max_files);
- retval = open_files->fd[file_desc].fd_file;
- /* 9ns might be using this one, in which case file == 0 */
- if (retval)
- kref_get(&retval->f_kref, 1);
+ assert(fd < fdt->max_files);
+ if (vfs)
+ retval = fdt->fd[fd].fd_file;
+ else
+ retval = fdt->fd[fd].fd_chan;
+ /* retval could be 0 if we asked for the wrong one (e.g. it's a
+ * file, but we asked for a chan) */
+ if (retval && incref) {
+ if (vfs)
+ kref_get(&((struct file*)retval)->f_kref, 1);
+ else
+ chan_incref((struct chan*)retval);
+ }
}
}
- spin_unlock(&open_files->lock);
+ spin_unlock(&fdt->lock);
return retval;
}
+/* Given any FD, get the appropriate file, 0 o/w */
+struct file *get_file_from_fd(struct fd_table *open_files, int file_desc)
+{
+ return lookup_fd(open_files, file_desc, TRUE, TRUE);
+}
+
/* Grow the vfs fd set */
-static int grow_fd_set(struct files_struct *open_files) {
+static int grow_fd_set(struct fd_table *open_files)
+{
int n;
struct file_desc *nfd, *ofd;
/* Grow the open_files->fd array in increments of NR_OPEN_FILES_DEFAULT */
n = open_files->max_files + NR_OPEN_FILES_DEFAULT;
if (n > NR_FILE_DESC_MAX)
- n = NR_FILE_DESC_MAX;
+ return -EMFILE;
nfd = kzmalloc(n * sizeof(struct file_desc), 0);
if (nfd == NULL)
- return -1;
+ return -ENOMEM;
/* Move the old array on top of the new one */
ofd = open_files->fd;
}
/* Free the vfs fd set if necessary */
-static void free_fd_set(struct files_struct *open_files) {
+static void free_fd_set(struct fd_table *open_files)
+{
+ void *free_me;
if (open_files->open_fds != (struct fd_set*)&open_files->open_fds_init) {
- kfree(open_files->open_fds);
assert(open_files->fd != open_files->fd_array);
- kfree(open_files->fd);
+ /* need to reset the pointers to the internal addrs, in case we take a
+ * look while debugging. 0 them out, since they have old data. our
+ * current versions should all be closed. */
+ memset(&open_files->open_fds_init, 0, sizeof(struct small_fd_set));
+ memset(&open_files->fd_array, 0, sizeof(open_files->fd_array));
+
+ free_me = open_files->open_fds;
+ open_files->open_fds = (struct fd_set*)&open_files->open_fds_init;
+ kfree(free_me);
+
+ free_me = open_files->fd;
+ open_files->fd = open_files->fd_array;
+ kfree(free_me);
}
}
-/* 9ns: puts back an FD from the VFS-FD-space. */
-int put_fd(struct files_struct *open_files, int file_desc)
+/* If FD is in the group, remove it, decref it, and return TRUE. */
+bool close_fd(struct fd_table *fdt, int fd)
{
- if (file_desc < 0) {
- warn("Negative FD!\n");
- return 0;
- }
- spin_lock(&open_files->lock);
- if (file_desc < open_files->max_fdset) {
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
+ struct file *file = 0;
+ struct chan *chan = 0;
+ struct fd_tap *tap = 0;
+ bool ret = FALSE;
+ if (fd < 0)
+ return FALSE;
+ spin_lock(&fdt->lock);
+ if (fd < fdt->max_fdset) {
+ if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, fd)) {
/* while max_files and max_fdset might not line up, we should never
* have a valid fdset higher than files */
- assert(file_desc < open_files->max_files);
- CLR_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc);
+ assert(fd < fdt->max_files);
+ file = fdt->fd[fd].fd_file;
+ chan = fdt->fd[fd].fd_chan;
+ tap = fdt->fd[fd].fd_tap;
+ fdt->fd[fd].fd_file = 0;
+ fdt->fd[fd].fd_chan = 0;
+ fdt->fd[fd].fd_tap = 0;
+ CLR_BITMASK_BIT(fdt->open_fds->fds_bits, fd);
+ if (fd < fdt->hint_min_fd)
+ fdt->hint_min_fd = fd;
+ ret = TRUE;
}
}
- spin_unlock(&open_files->lock);
- return 0;
+ spin_unlock(&fdt->lock);
+ /* Need to decref/cclose outside of the lock; they could sleep */
+ if (file)
+ kref_put(&file->f_kref);
+ else
+ cclose(chan);
+ if (tap)
+ kref_put(&tap->kref);
+ return ret;
}
-/* Remove FD from the open files, if it was there, and return f. Currently,
- * this decref's f, so the return value is not consumable or even usable. This
- * hasn't been thought through yet. */
-struct file *put_file_from_fd(struct files_struct *open_files, int file_desc)
+void put_file_from_fd(struct fd_table *open_files, int file_desc)
{
- struct file *file = 0;
- if (file_desc < 0)
- return 0;
- spin_lock(&open_files->lock);
- if (file_desc < open_files->max_fdset) {
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
- /* while max_files and max_fdset might not line up, we should never
- * have a valid fdset higher than files */
- assert(file_desc < open_files->max_files);
- file = open_files->fd[file_desc].fd_file;
- open_files->fd[file_desc].fd_file = 0;
- assert(file); /* 9ns shouldn't call this put */
- kref_put(&file->f_kref);
- CLR_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc);
- }
- }
- spin_unlock(&open_files->lock);
- return file;
+ close_fd(open_files, file_desc);
}
-static int __get_fd(struct files_struct *open_files, int low_fd)
+static int __get_fd(struct fd_table *open_files, int low_fd, bool must_use_low)
{
int slot = -1;
+ int error;
+ bool update_hint = TRUE;
if ((low_fd < 0) || (low_fd > NR_FILE_DESC_MAX))
return -EINVAL;
if (open_files->closed)
return -EINVAL; /* won't matter, they are dying */
-
+ if (must_use_low && GET_BITMASK_BIT(open_files->open_fds->fds_bits, low_fd))
+ return -ENFILE;
+ if (low_fd > open_files->hint_min_fd)
+ update_hint = FALSE;
+ else
+ low_fd = open_files->hint_min_fd;
/* Loop until we have a valid slot (we grow the fd_array at the bottom of
* the loop if we haven't found a slot in the current array */
while (slot == -1) {
SET_BITMASK_BIT(open_files->open_fds->fds_bits, slot);
assert(slot < open_files->max_files &&
open_files->fd[slot].fd_file == 0);
- if (slot >= open_files->next_fd)
- open_files->next_fd = slot + 1;
+ /* We know slot >= hint, since we started with the hint */
+ if (update_hint)
+ open_files->hint_min_fd = slot + 1;
break;
}
if (slot == -1) {
- /* Expand the FD array and fd_set */
- if (grow_fd_set(open_files) == -1)
- return -ENOMEM;
- /* loop after growing */
+ if ((error = grow_fd_set(open_files)))
+ return error;
}
}
return slot;
}
-/* Gets and claims a free FD, used by 9ns. < 0 == error. */
-int get_fd(struct files_struct *open_files, int low_fd)
+/* Insert a file or chan (obj, chosen by vfs) into the fd group with fd_flags.
+ * If must_use_low, then we have to insert at FD = low_fd. o/w we start looking
+ * for empty slots at low_fd. */
+int insert_obj_fdt(struct fd_table *fdt, void *obj, int low_fd, int fd_flags,
+ bool must_use_low, bool vfs)
{
int slot;
- spin_lock(&open_files->lock);
- slot = __get_fd(open_files, low_fd);
- spin_unlock(&open_files->lock);
- return slot;
-}
-
-static int __claim_fd(struct files_struct *open_files, int file_desc)
-{
- if ((file_desc < 0) || (file_desc > NR_FILE_DESC_MAX))
- return -EINVAL;
- if (open_files->closed)
- return -EINVAL; /* won't matter, they are dying */
-
- /* Grow the open_files->fd_set until the file_desc can fit inside it */
- while(file_desc >= open_files->max_files) {
- grow_fd_set(open_files);
- cpu_relax();
- }
-
- /* If we haven't grown, this could be a problem, so check for it */
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc))
- return -ENFILE; /* Should never really happen. Here to catch bugs. */
-
- SET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc);
- assert(file_desc < open_files->max_files &&
- open_files->fd[file_desc].fd_file == 0);
- if (file_desc >= open_files->next_fd)
- open_files->next_fd = file_desc + 1;
- return 0;
-}
-
-/* Claims a specific FD when duping FDs. used by 9ns. < 0 == error. */
-int claim_fd(struct files_struct *open_files, int file_desc)
-{
- int ret;
- spin_lock(&open_files->lock);
- ret = __claim_fd(open_files, file_desc);
- spin_unlock(&open_files->lock);
- return ret;
-}
-
-/* Inserts the file in the files_struct, returning the corresponding new file
- * descriptor, or an error code. We start looking for open fds from low_fd. */
-int insert_file(struct files_struct *open_files, struct file *file, int low_fd,
- bool must)
-{
- int slot, ret;
- spin_lock(&open_files->lock);
- if (must) {
- ret = __claim_fd(open_files, low_fd);
- if (ret < 0) {
- spin_unlock(&open_files->lock);
- return ret;
- }
- assert(!ret); /* issues with claim_fd returning status, not the fd */
- slot = low_fd;
- } else {
- slot = __get_fd(open_files, low_fd);
- }
-
+ spin_lock(&fdt->lock);
+ slot = __get_fd(fdt, low_fd, must_use_low);
if (slot < 0) {
- spin_unlock(&open_files->lock);
+ spin_unlock(&fdt->lock);
return slot;
}
- assert(slot < open_files->max_files &&
- open_files->fd[slot].fd_file == 0);
- kref_get(&file->f_kref, 1);
- open_files->fd[slot].fd_file = file;
- open_files->fd[slot].fd_flags = 0;
- spin_unlock(&open_files->lock);
+ assert(slot < fdt->max_files &&
+ fdt->fd[slot].fd_file == 0);
+ if (vfs) {
+ kref_get(&((struct file*)obj)->f_kref, 1);
+ fdt->fd[slot].fd_file = obj;
+ fdt->fd[slot].fd_chan = 0;
+ } else {
+ chan_incref((struct chan*)obj);
+ fdt->fd[slot].fd_file = 0;
+ fdt->fd[slot].fd_chan = obj;
+ }
+ fdt->fd[slot].fd_flags = fd_flags;
+ spin_unlock(&fdt->lock);
return slot;
}
+/* Inserts the file in the fd_table, returning the corresponding new file
+ * descriptor, or an error code. We start looking for open fds from low_fd.
+ *
+ * Passing cloexec is a bit cheap, since we might want to expand it to support
+ * more FD options in the future. */
+int insert_file(struct fd_table *open_files, struct file *file, int low_fd,
+ bool must, bool cloexec)
+{
+ return insert_obj_fdt(open_files, file, low_fd, cloexec ? FD_CLOEXEC : 0,
+ must, TRUE);
+}
+
/* Closes all open files. Mostly just a "put" for all files. If cloexec, it
- * will only close files that are opened with O_CLOEXEC. */
-void close_all_files(struct files_struct *open_files, bool cloexec)
+ * will only close the FDs with FD_CLOEXEC (opened with O_CLOEXEC or fcntld).
+ *
+ * Notes on concurrency:
+ * - Can't hold spinlocks while we call cclose, since it might sleep eventually.
+ * - We're called from proc_destroy, so we could have concurrent openers trying
+ * to add to the group (other syscalls), hence the "closed" flag.
+ * - dot and slash chans are dealt with in proc_free. its difficult to close
+ * and zero those with concurrent syscalls, since those are a source of krefs.
+ * - Once we lock and set closed, no further additions can happen. To simplify
+ * our closes, we also allow multiple calls to this func (though that should
+ * never happen with the current code). */
+void close_fdt(struct fd_table *fdt, bool cloexec)
{
struct file *file;
- spin_lock(&open_files->lock);
- if (open_files->closed) {
- spin_unlock(&open_files->lock);
+ struct chan *chan;
+ struct file_desc *to_close;
+ int idx = 0;
+
+ to_close = kzmalloc(sizeof(struct file_desc) * fdt->max_files,
+ MEM_WAIT);
+ spin_lock(&fdt->lock);
+ if (fdt->closed) {
+ spin_unlock(&fdt->lock);
+ kfree(to_close);
return;
}
- for (int i = 0; i < open_files->max_fdset; i++) {
- if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, i)) {
+ for (int i = 0; i < fdt->max_fdset; i++) {
+ if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, i)) {
/* while max_files and max_fdset might not line up, we should never
* have a valid fdset higher than files */
- assert(i < open_files->max_files);
- file = open_files->fd[i].fd_file;
- /* no file == 9ns uses the FD. they will deal with it */
- if (!file)
+ assert(i < fdt->max_files);
+ if (cloexec && !(fdt->fd[i].fd_flags & FD_CLOEXEC))
continue;
- if (cloexec && !(open_files->fd[i].fd_flags & O_CLOEXEC))
- continue;
- /* Actually close the file */
- open_files->fd[i].fd_file = 0;
- assert(file);
- kref_put(&file->f_kref);
- CLR_BITMASK_BIT(open_files->open_fds->fds_bits, i);
+ file = fdt->fd[i].fd_file;
+ chan = fdt->fd[i].fd_chan;
+ to_close[idx].fd_tap = fdt->fd[i].fd_tap;
+ fdt->fd[i].fd_tap = 0;
+ if (file) {
+ fdt->fd[i].fd_file = 0;
+ to_close[idx++].fd_file = file;
+ } else {
+ fdt->fd[i].fd_chan = 0;
+ to_close[idx++].fd_chan = chan;
+ }
+ CLR_BITMASK_BIT(fdt->open_fds->fds_bits, i);
}
}
+ /* it's just a hint, we can build back up from being 0 */
+ fdt->hint_min_fd = 0;
if (!cloexec) {
- free_fd_set(open_files);
- open_files->closed = TRUE;
+ free_fd_set(fdt);
+ fdt->closed = TRUE;
+ }
+ spin_unlock(&fdt->lock);
+ /* We go through some hoops to close/decref outside the lock. Nice for not
+ * holding the lock for a while; critical in case the decref/cclose sleeps
+ * (it can) */
+ for (int i = 0; i < idx; i++) {
+ if (to_close[i].fd_file)
+ kref_put(&to_close[i].fd_file->f_kref);
+ else
+ cclose(to_close[i].fd_chan);
+ if (to_close[i].fd_tap)
+ kref_put(&to_close[i].fd_tap->kref);
}
- spin_unlock(&open_files->lock);
+ kfree(to_close);
}
/* Inserts all of the files from src into dst, used by sys_fork(). */
-void clone_files(struct files_struct *src, struct files_struct *dst)
+void clone_fdt(struct fd_table *src, struct fd_table *dst)
{
struct file *file;
+ struct chan *chan;
+ int ret;
+
spin_lock(&src->lock);
if (src->closed) {
spin_unlock(&src->lock);
spin_unlock(&src->lock);
return;
}
+ while (src->max_files > dst->max_files) {
+ ret = grow_fd_set(dst);
+ if (ret < 0) {
+ set_error(-ret, "Failed to grow for a clone_fdt");
+ spin_unlock(&dst->lock);
+ spin_unlock(&src->lock);
+ return;
+ }
+ }
for (int i = 0; i < src->max_fdset; i++) {
if (GET_BITMASK_BIT(src->open_fds->fds_bits, i)) {
/* while max_files and max_fdset might not line up, we should never
* have a valid fdset higher than files */
assert(i < src->max_files);
file = src->fd[i].fd_file;
+ chan = src->fd[i].fd_chan;
assert(i < dst->max_files && dst->fd[i].fd_file == 0);
SET_BITMASK_BIT(dst->open_fds->fds_bits, i);
dst->fd[i].fd_file = file;
- /* no file means 9ns is using it, they clone separately */
+ dst->fd[i].fd_chan = chan;
if (file)
kref_get(&file->f_kref, 1);
- if (i >= dst->next_fd)
- dst->next_fd = i + 1;
+ else
+ chan_incref(chan);
}
}
+ dst->hint_min_fd = src->hint_min_fd;
spin_unlock(&dst->lock);
spin_unlock(&src->lock);
}
kbuf[cwd_l - 2] = '/';
/* for each dentry in the path, all the way back to the root of fs_env, we
* grab the dentry name, push path_start back enough, and write in the name,
- * using /'s to terminate. We skip the root, since we don't want it's
+ * using /'s to terminate. We skip the root, since we don't want its
* actual name, just "/", which is set before each loop. */
path_start = kbuf + cwd_l - 2; /* the last byte written */
while (dentry != fs_env->root) {
return 0;
}
path_start -= link_len;
- strncpy(path_start, dentry->d_name.name, link_len);
+ memmove(path_start, dentry->d_name.name, link_len);
path_start--;
*path_start = '/';
- dentry = dentry->d_parent;
+ dentry = dentry->d_parent;
}
return path_start;
}
default:
warn("Look around you! Unknown filetype!");
}
- kref_put(&child_d->d_kref);
+ kref_put(&child_d->d_kref);
}
loop_next:
if (retval <= 0)