1 /* Copyright (c) 2009, 2010 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * Default implementations and global values for the VFS. */
7 #include <vfs.h> // keep this first
23 struct sb_tailq super_blocks = TAILQ_HEAD_INITIALIZER(super_blocks);
24 spinlock_t super_blocks_lock = SPINLOCK_INITIALIZER;
25 struct fs_type_tailq file_systems = TAILQ_HEAD_INITIALIZER(file_systems);
26 struct namespace default_ns;
28 struct kmem_cache *dentry_kcache; // not to be confused with the dcache
29 struct kmem_cache *inode_kcache;
30 struct kmem_cache *file_kcache;
38 /* mtime implies ctime implies atime. */
39 static void set_acmtime(struct inode *inode, int which)
41 struct timespec now = nsec2timespec(epoch_nsec());
45 inode->i_mtime.tv_sec = now.tv_sec;
46 inode->i_mtime.tv_nsec = now.tv_nsec;
49 inode->i_ctime.tv_sec = now.tv_sec;
50 inode->i_ctime.tv_nsec = now.tv_nsec;
53 inode->i_atime.tv_sec = now.tv_sec;
54 inode->i_atime.tv_nsec = now.tv_nsec;
58 /* Mounts fs from dev_name at mnt_pt in namespace ns. There could be no mnt_pt,
59 * such as with the root of (the default) namespace. Not sure how it would work
60 * with multiple namespaces on the same FS yet. Note if you mount the same FS
61 * multiple times, you only have one FS still (and one SB). If we ever support
63 struct vfsmount *__mount_fs(struct fs_type *fs, char *dev_name,
64 struct dentry *mnt_pt, int flags,
67 struct super_block *sb;
68 struct vfsmount *vmnt = kmalloc(sizeof(struct vfsmount), 0);
70 /* this first ref is stored in the NS tailq below */
71 kref_init(&vmnt->mnt_kref, fake_release, 1);
72 /* Build the vfsmount, if there is no mnt_pt, mnt is the root vfsmount (for
73 * now). fields related to the actual FS, like the sb and the mnt_root are
74 * set in the fs-specific get_sb() call. */
76 vmnt->mnt_parent = NULL;
77 vmnt->mnt_mountpoint = NULL;
78 } else { /* common case, but won't be tested til we try to mount another FS */
79 mnt_pt->d_mount_point = TRUE;
80 mnt_pt->d_mounted_fs = vmnt;
81 kref_get(&vmnt->mnt_kref, 1); /* held by mnt_pt */
82 vmnt->mnt_parent = mnt_pt->d_sb->s_mount;
83 vmnt->mnt_mountpoint = mnt_pt;
85 TAILQ_INIT(&vmnt->mnt_child_mounts);
86 vmnt->mnt_flags = flags;
87 vmnt->mnt_devname = dev_name;
88 vmnt->mnt_namespace = ns;
89 kref_get(&ns->kref, 1); /* held by vmnt */
91 /* Read in / create the SB */
92 sb = fs->get_sb(fs, flags, dev_name, vmnt);
94 panic("You're FS sucks");
96 /* TODO: consider moving this into get_sb or something, in case the SB
97 * already exists (mounting again) (if we support that) */
98 spin_lock(&super_blocks_lock);
99 TAILQ_INSERT_TAIL(&super_blocks, sb, s_list); /* storing a ref here... */
100 spin_unlock(&super_blocks_lock);
102 /* Update holding NS */
103 spin_lock(&ns->lock);
104 TAILQ_INSERT_TAIL(&ns->vfsmounts, vmnt, mnt_list);
105 spin_unlock(&ns->lock);
106 /* note to self: so, right after this point, the NS points to the root FS
107 * mount (we return the mnt, which gets assigned), the root mnt has a dentry
108 * for /, backed by an inode, with a SB prepped and in memory. */
116 dentry_kcache = kmem_cache_create("dentry", sizeof(struct dentry),
117 __alignof__(struct dentry), 0,
119 inode_kcache = kmem_cache_create("inode", sizeof(struct inode),
120 __alignof__(struct inode), 0, NULL,
122 file_kcache = kmem_cache_create("file", sizeof(struct file),
123 __alignof__(struct file), 0, NULL, 0,
125 /* default NS never dies, +1 to exist */
126 kref_init(&default_ns.kref, fake_release, 1);
127 spinlock_init(&default_ns.lock);
128 default_ns.root = NULL;
129 TAILQ_INIT(&default_ns.vfsmounts);
131 /* build list of all FS's in the system. put yours here. if this is ever
132 * done on the fly, we'll need to lock. */
133 TAILQ_INSERT_TAIL(&file_systems, &kfs_fs_type, list);
135 TAILQ_INSERT_TAIL(&file_systems, &ext2_fs_type, list);
137 TAILQ_FOREACH(fs, &file_systems, list)
138 printk("Supports the %s Filesystem\n", fs->name);
140 /* mounting KFS at the root (/), pending root= parameters */
141 // TODO: linux creates a temp root_fs, then mounts the real root onto that
142 default_ns.root = __mount_fs(&kfs_fs_type, "RAM", NULL, 0, &default_ns);
144 printk("vfs_init() completed\n");
147 /* FS's can provide another, if they want */
148 int generic_dentry_hash(struct dentry *dentry, struct qstr *qstr)
150 unsigned long hash = 5381;
152 for (int i = 0; i < qstr->len; i++) {
153 /* hash * 33 + c, djb2's technique */
154 hash = ((hash << 5) + hash) + qstr->name[i];
159 /* Builds / populates the qstr of a dentry based on its d_iname. If there is an
160 * l_name, (long), it will use that instead of the inline name. This will
161 * probably change a bit. */
162 void qstr_builder(struct dentry *dentry, char *l_name)
164 dentry->d_name.name = l_name ? l_name : dentry->d_iname;
165 dentry->d_name.len = strnlen(dentry->d_name.name, MAX_FILENAME_SZ);
166 dentry->d_name.hash = dentry->d_op->d_hash(dentry, &dentry->d_name);
169 /* Useful little helper - return the string ptr for a given file */
170 char *file_name(struct file *file)
172 return file->f_dentry->d_name.name;
175 static int prepend(char **pbuf, size_t *pbuflen, const char *str, size_t len)
178 return -ENAMETOOLONG;
181 memcpy(*pbuf, str, len);
186 char *dentry_path(struct dentry *dentry, char *path, size_t max_size)
188 size_t csize = max_size;
189 char *path_start = path + max_size, *base;
191 if (prepend(&path_start, &csize, "\0", 1) < 0 || csize < 1)
193 /* Handle the case that the passed dentry is the root. */
194 base = path_start - 1;
196 while (!DENTRY_IS_ROOT(dentry)) {
197 if (prepend(&path_start, &csize, dentry->d_name.name,
198 dentry->d_name.len) < 0 ||
199 prepend(&path_start, &csize, "/", 1) < 0)
202 dentry = dentry->d_parent;
208 /* Some issues with this, coupled closely to fs_lookup.
210 * Note the use of __dentry_free, instead of kref_put. In those cases, we don't
211 * want to treat it like a kref and we have the only reference to it, so it is
212 * okay to do this. It makes dentry_release() easier too. */
213 static struct dentry *do_lookup(struct dentry *parent, char *name)
215 struct dentry *result, *query;
216 query = get_dentry(parent->d_sb, parent, name);
218 warn("OOM in do_lookup(), probably wasn't expected\n");
221 result = dcache_get(parent->d_sb, query);
223 __dentry_free(query);
226 /* No result, check for negative */
227 if (query->d_flags & DENTRY_NEGATIVE) {
228 __dentry_free(query);
231 /* not in the dcache at all, need to consult the FS */
232 result = parent->d_inode->i_op->lookup(parent->d_inode, query, 0);
234 /* Note the USED flag will get turned off when this gets added to the
235 * LRU in dentry_release(). There's a slight race here that we'll panic
236 * on, but I want to catch it (in dcache_put()) for now. */
237 query->d_flags |= DENTRY_NEGATIVE;
238 dcache_put(parent->d_sb, query);
239 kref_put(&query->d_kref);
242 dcache_put(parent->d_sb, result);
243 /* This is because KFS doesn't return the same dentry, but ext2 does. this
244 * is ugly and needs to be fixed. (TODO) */
246 __dentry_free(query);
248 /* TODO: if the following are done by us, how do we know the i_ino?
249 * also need to handle inodes that are already read in! For now, we're
250 * going to have the FS handle it in its lookup() method:
252 * - read in the inode
253 * - put in the inode cache */
257 /* Update ND such that it represents having followed dentry. IAW the nd
258 * refcnting rules, we need to decref any references that were in there before
259 * they get clobbered. */
260 static int next_link(struct dentry *dentry, struct nameidata *nd)
262 assert(nd->dentry && nd->mnt);
263 /* update the dentry */
264 kref_get(&dentry->d_kref, 1);
265 kref_put(&nd->dentry->d_kref);
267 /* update the mount, if we need to */
268 if (dentry->d_sb->s_mount != nd->mnt) {
269 kref_get(&dentry->d_sb->s_mount->mnt_kref, 1);
270 kref_put(&nd->mnt->mnt_kref);
271 nd->mnt = dentry->d_sb->s_mount;
276 /* Walk up one directory, being careful of mountpoints, namespaces, and the top
278 static int climb_up(struct nameidata *nd)
280 printd("CLIMB_UP, from %s\n", nd->dentry->d_name.name);
281 /* Top of the world, just return. Should also check for being at the top of
282 * the current process's namespace (TODO) */
283 if (!nd->dentry->d_parent || (nd->dentry->d_parent == nd->dentry))
285 /* Check if we are at the top of a mount, if so, we need to follow
286 * backwards, and then climb_up from that one. We might need to climb
287 * multiple times if we mount multiple FSs at the same spot (highly
288 * unlikely). This is completely untested. Might recurse instead. */
289 while (nd->mnt->mnt_root == nd->dentry) {
290 if (!nd->mnt->mnt_parent) {
291 warn("Might have expected a parent vfsmount (dentry had a parent)");
294 next_link(nd->mnt->mnt_mountpoint, nd);
296 /* Backwards walk (no mounts or any other issues now). */
297 next_link(nd->dentry->d_parent, nd);
298 printd("CLIMB_UP, to %s\n", nd->dentry->d_name.name);
302 /* nd->dentry might be on a mount point, so we need to move on to the child
304 static int follow_mount(struct nameidata *nd)
306 if (!nd->dentry->d_mount_point)
308 next_link(nd->dentry->d_mounted_fs->mnt_root, nd);
312 static int link_path_walk(char *path, struct nameidata *nd);
314 /* When nd->dentry is for a symlink, this will recurse and follow that symlink,
315 * so that nd contains the results of following the symlink (dentry and mnt).
316 * Returns when it isn't a symlink, 1 on following a link, and < 0 on error. */
317 static int follow_symlink(struct nameidata *nd)
321 if (!S_ISLNK(nd->dentry->d_inode->i_mode))
323 if (nd->depth > MAX_SYMLINK_DEPTH)
325 printd("Following symlink for dentry %p %s\n", nd->dentry,
326 nd->dentry->d_name.name);
328 symname = nd->dentry->d_inode->i_op->readlink(nd->dentry);
329 /* We need to pin in nd->dentry (the dentry of the symlink), since we need
330 * its symname's storage to stay in memory throughout the upcoming
331 * link_path_walk(). The last_sym gets decreffed when we path_release() or
332 * follow another symlink. */
334 kref_put(&nd->last_sym->d_kref);
335 kref_get(&nd->dentry->d_kref, 1);
336 nd->last_sym = nd->dentry;
337 /* If this an absolute path in the symlink, we need to free the old path and
338 * start over, otherwise, we continue from the PARENT of nd (the symlink) */
339 if (symname[0] == '/') {
342 nd->dentry = default_ns.root->mnt_root;
344 nd->dentry = current->fs_env.root;
345 nd->mnt = nd->dentry->d_sb->s_mount;
346 kref_get(&nd->mnt->mnt_kref, 1);
347 kref_get(&nd->dentry->d_kref, 1);
351 /* either way, keep on walking in the free world! */
352 retval = link_path_walk(symname, nd);
353 return (retval == 0 ? 1 : retval);
356 /* Little helper, to make it easier to break out of the nested loops. Will also
357 * '\0' out the first slash if it's slashes all the way down. Or turtles. */
358 static bool packed_trailing_slashes(char *first_slash)
360 for (char *i = first_slash; *i == '/'; i++) {
361 if (*(i + 1) == '\0') {
369 /* Simple helper to set nd to track its last name to be Name. Also be careful
370 * with the storage of name. Don't use and nd's name past the lifetime of the
371 * string used in the path_lookup()/link_path_walk/whatever. Consider replacing
372 * parts of this with a qstr builder. Note this uses the dentry's d_op, which
373 * might not be the dentry we care about. */
374 static void stash_nd_name(struct nameidata *nd, char *name)
376 nd->last.name = name;
377 nd->last.len = strlen(name);
378 nd->last.hash = nd->dentry->d_op->d_hash(nd->dentry, &nd->last);
381 /* Resolves the links in a basic path walk. 0 for success, -EWHATEVER
382 * otherwise. The final lookup is returned via nd. */
383 static int link_path_walk(char *path, struct nameidata *nd)
385 struct dentry *link_dentry;
386 struct inode *link_inode, *nd_inode;
391 /* Prevent crazy recursion */
392 if (nd->depth > MAX_SYMLINK_DEPTH)
394 /* skip all leading /'s */
397 /* if there's nothing left (null terminated), we're done. This should only
398 * happen for "/", which if we wanted a PARENT, should fail (there is no
401 if (nd->flags & LOOKUP_PARENT) {
405 /* o/w, we're good */
408 /* iterate through each intermediate link of the path. in general, nd
409 * tracks where we are in the path, as far as dentries go. once we have the
410 * next dentry, we try to update nd based on that dentry. link is the part
411 * of the path string that we are looking up */
413 nd_inode = nd->dentry->d_inode;
414 if ((error = check_perms(nd_inode, nd->intent)))
416 /* find the next link, break out if it is the end */
417 next_slash = strchr(link, '/');
421 if (packed_trailing_slashes(next_slash)) {
422 nd->flags |= LOOKUP_DIRECTORY;
426 /* skip over any interim ./ */
427 if (!strncmp("./", link, 2))
429 /* Check for "../", walk up */
430 if (!strncmp("../", link, 3)) {
435 link_dentry = do_lookup(nd->dentry, link);
439 /* make link_dentry the current step/answer */
440 next_link(link_dentry, nd);
441 kref_put(&link_dentry->d_kref); /* do_lookup gave us a refcnt dentry */
442 /* we could be on a mountpoint or a symlink - need to follow them */
444 if ((error = follow_symlink(nd)) < 0)
446 /* Turn off a possible DIRECTORY lookup, which could have been set
447 * during the follow_symlink (a symlink could have had a directory at
448 * the end), though it was in the middle of the real path. */
449 nd->flags &= ~LOOKUP_DIRECTORY;
450 if (!S_ISDIR(nd->dentry->d_inode->i_mode))
453 /* move through the path string to the next entry */
454 link = next_slash + 1;
455 /* advance past any other interim slashes. we know we won't hit the end
456 * due to the for loop check above */
460 /* Now, we're on the last link of the path. We need to deal with with . and
461 * .. . This might be weird with PARENT lookups - not sure what semantics
462 * we want exactly. This will give the parent of whatever the PATH was
463 * supposed to look like. Note that ND currently points to the parent of
464 * the last item (link). */
465 if (!strcmp(".", link)) {
466 if (nd->flags & LOOKUP_PARENT) {
467 assert(nd->dentry->d_name.name);
468 stash_nd_name(nd, nd->dentry->d_name.name);
473 if (!strcmp("..", link)) {
475 if (nd->flags & LOOKUP_PARENT) {
476 assert(nd->dentry->d_name.name);
477 stash_nd_name(nd, nd->dentry->d_name.name);
482 /* need to attempt to look it up, in case it's a symlink */
483 link_dentry = do_lookup(nd->dentry, link);
485 /* if there's no dentry, we are okay if we are looking for the parent */
486 if (nd->flags & LOOKUP_PARENT) {
487 assert(strcmp(link, ""));
488 stash_nd_name(nd, link);
494 next_link(link_dentry, nd);
495 kref_put(&link_dentry->d_kref); /* do_lookup gave us a refcnt'd dentry */
496 /* at this point, nd is on the final link, but it might be a symlink */
497 if (nd->flags & LOOKUP_FOLLOW) {
498 error = follow_symlink(nd);
501 /* if we actually followed a symlink, then nd is set and we're done */
505 /* One way or another, nd is on the last element of the path, symlinks and
506 * all. Now we need to climb up to set nd back on the parent, if that's
508 if (nd->flags & LOOKUP_PARENT) {
509 assert(nd->dentry->d_name.name);
510 stash_nd_name(nd, link_dentry->d_name.name);
514 /* now, we have the dentry set, and don't want the parent, but might be on a
515 * mountpoint still. FYI: this hasn't been thought through completely. */
517 /* If we wanted a directory, but didn't get one, error out */
518 if ((nd->flags & LOOKUP_DIRECTORY) && !S_ISDIR(nd->dentry->d_inode->i_mode))
523 /* Given path, return the inode for the final dentry. The ND should be
524 * initialized for the first call - specifically, we need the intent.
525 * LOOKUP_PARENT and friends go in the flags var, which is not the intent.
527 * If path_lookup wants a PARENT, but hits the top of the FS (root or
528 * otherwise), we want it to error out. It's still unclear how we want to
529 * handle processes with roots that aren't root, but at the very least, we don't
530 * want to think we have the parent of /, but have / itself. Due to the way
531 * link_path_walk works, if that happened, we probably don't have a
532 * nd->last.name. This needs more thought (TODO).
534 * Need to be careful too. While the path has been copied-in to the kernel,
535 * it's still user input. */
536 int path_lookup(char *path, int flags, struct nameidata *nd)
539 printd("Path lookup for %s\n", path);
540 /* we allow absolute lookups with no process context */
541 /* TODO: RCU read lock on pwd or kref_not_zero in a loop. concurrent chdir
542 * could decref nd->dentry before we get to incref it below. */
543 if (path[0] == '/') { /* absolute lookup */
545 nd->dentry = default_ns.root->mnt_root;
547 nd->dentry = current->fs_env.root;
548 } else { /* relative lookup */
550 /* Don't need to lock on the fs_env since we're reading one item */
551 nd->dentry = current->fs_env.pwd;
553 nd->mnt = nd->dentry->d_sb->s_mount;
554 /* Whenever references get put in the nd, incref them. Whenever they are
555 * removed, decref them. */
556 kref_get(&nd->mnt->mnt_kref, 1);
557 kref_get(&nd->dentry->d_kref, 1);
559 nd->depth = 0; /* used in symlink following */
560 retval = link_path_walk(path, nd);
561 /* make sure our PARENT lookup worked */
562 if (!retval && (flags & LOOKUP_PARENT))
563 assert(nd->last.name);
567 /* Call this after any use of path_lookup when you are done with its results,
568 * regardless of whether it succeeded or not. It will free any references */
569 void path_release(struct nameidata *nd)
571 kref_put(&nd->dentry->d_kref);
572 kref_put(&nd->mnt->mnt_kref);
573 /* Free the last symlink dentry used, if there was one */
575 kref_put(&nd->last_sym->d_kref);
576 nd->last_sym = 0; /* catch reuse bugs */
580 /* External version of mount, only call this after having a / mount */
581 int mount_fs(struct fs_type *fs, char *dev_name, char *path, int flags)
583 struct nameidata nd_r = {0}, *nd = &nd_r;
585 retval = path_lookup(path, LOOKUP_DIRECTORY, nd);
588 /* taking the namespace of the vfsmount of path */
589 if (!__mount_fs(fs, dev_name, nd->dentry, flags, nd->mnt->mnt_namespace))
596 /* Superblock functions */
598 /* Dentry "hash" function for the hash table to use. Since we already have the
599 * hash in the qstr, we don't need to rehash. Also, note we'll be using the
600 * dentry in question as both the key and the value. */
601 static size_t __dcache_hash(void *k)
603 return (size_t)((struct dentry*)k)->d_name.hash;
606 /* Dentry cache hashtable equality function. This means we need to pass in some
607 * minimal dentry when doing a lookup. */
608 static ssize_t __dcache_eq(void *k1, void *k2)
610 if (((struct dentry*)k1)->d_parent != ((struct dentry*)k2)->d_parent)
612 /* TODO: use the FS-specific string comparison */
613 return !strcmp(((struct dentry*)k1)->d_name.name,
614 ((struct dentry*)k2)->d_name.name);
617 /* Helper to alloc and initialize a generic superblock. This handles all the
618 * VFS related things, like lists. Each FS will need to handle its own things
619 * in its *_get_sb(), usually involving reading off the disc. */
620 struct super_block *get_sb(void)
622 struct super_block *sb = kmalloc(sizeof(struct super_block), 0);
624 spinlock_init(&sb->s_lock);
625 kref_init(&sb->s_kref, fake_release, 1); /* for the ref passed out */
626 TAILQ_INIT(&sb->s_inodes);
627 TAILQ_INIT(&sb->s_dirty_i);
628 TAILQ_INIT(&sb->s_io_wb);
629 TAILQ_INIT(&sb->s_lru_d);
630 TAILQ_INIT(&sb->s_files);
631 sb->s_dcache = create_hashtable(100, __dcache_hash, __dcache_eq);
632 sb->s_icache = create_hashtable(100, __generic_hash, __generic_eq);
633 spinlock_init(&sb->s_lru_lock);
634 spinlock_init(&sb->s_dcache_lock);
635 spinlock_init(&sb->s_icache_lock);
636 sb->s_fs_info = 0; // can override somewhere else
640 /* Final stages of initializing a super block, including creating and linking
641 * the root dentry, root inode, vmnt, and sb. The d_op and root_ino are
642 * FS-specific, but otherwise its FS-independent, tricky, and not worth having
643 * around multiple times.
645 * Not the world's best interface, so it's subject to change, esp since we're
646 * passing (now 3) FS-specific things. */
647 void init_sb(struct super_block *sb, struct vfsmount *vmnt,
648 struct dentry_operations *d_op, unsigned long root_ino,
651 /* Build and init the first dentry / inode. The dentry ref is stored later
652 * by vfsmount's mnt_root. The parent is dealt with later. */
653 struct dentry *d_root = get_dentry_with_ops(sb, 0, "/", d_op);
656 panic("OOM! init_sb() can't fail yet!");
657 /* a lot of here on down is normally done in lookup() or create, since
658 * get_dentry isn't a fully usable dentry. The two FS-specific settings are
659 * normally inherited from a parent within the same FS in get_dentry, but we
662 d_root->d_fs_info = d_fs_info;
663 struct inode *inode = get_inode(d_root);
665 panic("This FS sucks!");
666 inode->i_ino = root_ino;
667 /* TODO: add the inode to the appropriate list (off i_list) */
668 /* TODO: do we need to read in the inode? can we do this on demand? */
669 /* if this FS is already mounted, we'll need to do something different. */
670 sb->s_op->read_inode(inode);
671 icache_put(sb, inode);
672 /* Link the dentry and SB to the VFS mount */
673 vmnt->mnt_root = d_root; /* ref comes from get_dentry */
675 /* If there is no mount point, there is no parent. This is true only for
677 if (vmnt->mnt_mountpoint) {
678 kref_get(&vmnt->mnt_mountpoint->d_kref, 1); /* held by d_root */
679 d_root->d_parent = vmnt->mnt_mountpoint; /* dentry of the root */
681 d_root->d_parent = d_root; /* set root as its own parent */
683 /* insert the dentry into the dentry cache. when's the earliest we can?
684 * when's the earliest we should? what about concurrent accesses to the
685 * same dentry? should be locking the dentry... */
686 dcache_put(sb, d_root);
687 kref_put(&inode->i_kref); /* give up the ref from get_inode() */
690 /* Dentry Functions */
692 static void dentry_set_name(struct dentry *dentry, char *name)
694 size_t name_len = strnlen(name, MAX_FILENAME_SZ); /* not including \0! */
696 if (name_len < DNAME_INLINE_LEN) {
697 strlcpy(dentry->d_iname, name, name_len + 1);
698 qstr_builder(dentry, 0);
700 l_name = kmalloc(name_len + 1, 0);
702 strlcpy(l_name, name, name_len + 1);
703 qstr_builder(dentry, l_name);
707 /* Gets a dentry. If there is no parent, use d_op. Only called directly by
708 * superblock init code. */
709 struct dentry *get_dentry_with_ops(struct super_block *sb,
710 struct dentry *parent, char *name,
711 struct dentry_operations *d_op)
714 struct dentry *dentry = kmem_cache_alloc(dentry_kcache, 0);
720 //memset(dentry, 0, sizeof(struct dentry));
721 kref_init(&dentry->d_kref, dentry_release, 1); /* this ref is returned */
722 spinlock_init(&dentry->d_lock);
723 TAILQ_INIT(&dentry->d_subdirs);
725 kref_get(&sb->s_kref, 1);
726 dentry->d_sb = sb; /* storing a ref here... */
727 dentry->d_mount_point = FALSE;
728 dentry->d_mounted_fs = 0;
729 if (parent) { /* no parent for rootfs mount */
730 kref_get(&parent->d_kref, 1);
731 dentry->d_op = parent->d_op; /* d_op set in init_sb for parentless */
735 dentry->d_parent = parent;
736 dentry->d_flags = DENTRY_USED;
737 dentry->d_fs_info = 0;
738 dentry_set_name(dentry, name);
739 /* Catch bugs by aggressively zeroing this (o/w we use old stuff) */
744 /* Helper to alloc and initialize a generic dentry. The following needs to be
745 * set still: d_op (if no parent), d_fs_info (opt), d_inode, connect the inode
746 * to the dentry (and up the d_kref again), maybe dcache_put(). The inode
747 * stitching is done in get_inode() or lookup (depending on the FS).
748 * The setting of the d_op might be problematic when dealing with mounts. Just
751 * If the name is longer than the inline name, it will kmalloc a buffer, so
752 * don't worry about the storage for *name after calling this. */
753 struct dentry *get_dentry(struct super_block *sb, struct dentry *parent,
756 return get_dentry_with_ops(sb, parent, name, 0);
759 /* Called when the dentry is unreferenced (after kref == 0). This works closely
760 * with the resurrection in dcache_get().
762 * The dentry is still in the dcache, but needs to be un-USED and added to the
763 * LRU dentry list. Even dentries that were used in a failed lookup need to be
764 * cached - they ought to be the negative dentries. Note that all dentries have
765 * parents, even negative ones (it is needed to find it in the dcache). */
766 void dentry_release(struct kref *kref)
768 struct dentry *dentry = container_of(kref, struct dentry, d_kref);
770 printd("'Releasing' dentry %p: %s\n", dentry, dentry->d_name.name);
771 /* DYING dentries (recently unlinked / rmdir'd) just get freed */
772 if (dentry->d_flags & DENTRY_DYING) {
773 __dentry_free(dentry);
776 /* This lock ensures the USED state and the TAILQ membership is in sync.
777 * Also used to check the refcnt, though that might not be necessary. */
778 spin_lock(&dentry->d_lock);
779 /* While locked, we need to double check the kref, in case someone already
780 * reup'd it. Re-up? you're crazy! Reee-up, you're outta yo mind! */
781 if (!kref_refcnt(&dentry->d_kref)) {
782 /* Note this is where negative dentries get set UNUSED */
783 if (dentry->d_flags & DENTRY_USED) {
784 dentry->d_flags &= ~DENTRY_USED;
785 spin_lock(&dentry->d_sb->s_lru_lock);
786 TAILQ_INSERT_TAIL(&dentry->d_sb->s_lru_d, dentry, d_lru);
787 spin_unlock(&dentry->d_sb->s_lru_lock);
789 /* and make sure it wasn't USED, then UNUSED again */
790 /* TODO: think about issues with this */
791 warn("This should be rare. Tell brho this happened.");
794 spin_unlock(&dentry->d_lock);
797 /* Called when we really dealloc and get rid of a dentry (like when it is
798 * removed from the dcache, either for memory or correctness reasons)
800 * This has to handle two types of dentries: full ones (ones that had been used)
801 * and ones that had been just for lookups - hence the check for d_inode.
803 * Note that dentries pin and kref their inodes. When all the dentries are
804 * gone, we want the inode to be released via kref. The inode has internal /
805 * weak references to the dentry, which are not refcounted. */
806 void __dentry_free(struct dentry *dentry)
809 printd("Freeing dentry %p: %s\n", dentry, dentry->d_name.name);
810 assert(dentry->d_op); /* catch bugs. a while back, some lacked d_op */
811 dentry->d_op->d_release(dentry);
812 /* TODO: check/test the boundaries on this. */
813 if (dentry->d_name.len > DNAME_INLINE_LEN)
814 kfree((void*)dentry->d_name.name);
815 kref_put(&dentry->d_sb->s_kref);
816 if (dentry->d_parent)
817 kref_put(&dentry->d_parent->d_kref);
818 if (dentry->d_mounted_fs)
819 kref_put(&dentry->d_mounted_fs->mnt_kref);
820 if (dentry->d_inode) {
821 TAILQ_REMOVE(&dentry->d_inode->i_dentry, dentry, d_alias);
822 kref_put(&dentry->d_inode->i_kref); /* dentries kref inodes */
824 kmem_cache_free(dentry_kcache, dentry);
827 /* Looks up the dentry for the given path, returning a refcnt'd dentry (or 0).
828 * Permissions are applied for the current user, which is quite a broken system
829 * at the moment. Flags are lookup flags. */
830 struct dentry *lookup_dentry(char *path, int flags)
832 struct dentry *dentry;
833 struct nameidata nd_r = {0}, *nd = &nd_r;
836 error = path_lookup(path, flags, nd);
843 kref_get(&dentry->d_kref, 1);
848 /* Get a dentry from the dcache. At a minimum, we need the name hash and parent
849 * in what_i_want, though most uses will probably be from a get_dentry() call.
850 * We pass in the SB in the off chance that we don't want to use a get'd dentry.
852 * The unusual variable name (instead of just "key" or something) is named after
853 * ex-SPC Castro's porn folder. Caller deals with the memory for what_i_want.
855 * If the dentry is negative, we don't return the actual result - instead, we
856 * set the negative flag in 'what i want'. The reason is we don't want to
857 * kref_get() and then immediately put (causing dentry_release()). This also
858 * means that dentry_release() should never get someone who wasn't USED (barring
859 * the race, which it handles). And we don't need to ever have a dentry set as
860 * USED and NEGATIVE (which is always wrong, but would be needed for a cleaner
863 * This is where we do the "kref resurrection" - we are returning a kref'd
864 * object, even if it wasn't kref'd before. This means the dcache does NOT hold
865 * krefs (it is a weak/internal ref), but it is a source of kref generation. We
866 * sync up with the possible freeing of the dentry by locking the table. See
867 * Doc/kref for more info. */
868 struct dentry *dcache_get(struct super_block *sb, struct dentry *what_i_want)
870 struct dentry *found;
871 /* This lock protects the hash, as well as ensures the returned object
872 * doesn't get deleted/freed out from under us */
873 spin_lock(&sb->s_dcache_lock);
874 found = hashtable_search(sb->s_dcache, what_i_want);
876 if (found->d_flags & DENTRY_NEGATIVE) {
877 what_i_want->d_flags |= DENTRY_NEGATIVE;
878 spin_unlock(&sb->s_dcache_lock);
881 spin_lock(&found->d_lock);
882 __kref_get(&found->d_kref, 1); /* prob could be done outside the lock*/
883 /* If we're here (after kreffing) and it is not USED, we are the one who
884 * should resurrect */
885 if (!(found->d_flags & DENTRY_USED)) {
886 found->d_flags |= DENTRY_USED;
887 spin_lock(&sb->s_lru_lock);
888 TAILQ_REMOVE(&sb->s_lru_d, found, d_lru);
889 spin_unlock(&sb->s_lru_lock);
891 spin_unlock(&found->d_lock);
893 spin_unlock(&sb->s_dcache_lock);
897 /* Adds a dentry to the dcache. Note the *dentry is both the key and the value.
898 * If the value was already in there (which can happen iff it was negative), for
899 * now we'll remove it and put the new one in there. */
900 void dcache_put(struct super_block *sb, struct dentry *key_val)
904 spin_lock(&sb->s_dcache_lock);
905 old = hashtable_remove(sb->s_dcache, key_val);
906 /* if it is old and non-negative, our caller lost a race with someone else
907 * adding the dentry. but since we yanked it out, like a bunch of idiots,
908 * we still have to put it back. should be fairly rare. */
909 if (old && (old->d_flags & DENTRY_NEGATIVE)) {
910 /* This is possible, but rare for now (about to be put on the LRU) */
911 assert(!(old->d_flags & DENTRY_USED));
912 assert(!kref_refcnt(&old->d_kref));
913 spin_lock(&sb->s_lru_lock);
914 TAILQ_REMOVE(&sb->s_lru_d, old, d_lru);
915 spin_unlock(&sb->s_lru_lock);
916 /* TODO: this seems suspect. isn't this the same memory as key_val?
917 * in which case, we just adjust the flags (remove NEG) and reinsert? */
918 assert(old != key_val); // checking TODO comment
921 /* this returns 0 on failure (TODO: Fix this ghetto shit) */
922 retval = hashtable_insert(sb->s_dcache, key_val, key_val);
924 spin_unlock(&sb->s_dcache_lock);
927 /* Will remove and return the dentry. Caller deallocs the key, but the retval
928 * won't have a reference. * Returns 0 if it wasn't found. Callers can't
929 * assume much - they should not use the reference they *get back*, (if they
930 * already had one for key, they can use that). There may be other users out
932 struct dentry *dcache_remove(struct super_block *sb, struct dentry *key)
934 struct dentry *retval;
935 spin_lock(&sb->s_dcache_lock);
936 retval = hashtable_remove(sb->s_dcache, key);
937 spin_unlock(&sb->s_dcache_lock);
941 /* This will clean out the LRU list, which are the unused dentries of the dentry
942 * cache. This will optionally only free the negative ones. Note that we grab
943 * the hash lock for the time we traverse the LRU list - this prevents someone
944 * from getting a kref from the dcache, which could cause us trouble (we rip
945 * someone off the list, who isn't unused, and they try to rip them off the
947 void dcache_prune(struct super_block *sb, bool negative_only)
949 struct dentry *d_i, *temp;
950 struct dentry_tailq victims = TAILQ_HEAD_INITIALIZER(victims);
952 spin_lock(&sb->s_dcache_lock);
953 spin_lock(&sb->s_lru_lock);
954 TAILQ_FOREACH_SAFE(d_i, &sb->s_lru_d, d_lru, temp) {
955 if (!(d_i->d_flags & DENTRY_USED)) {
956 if (negative_only && !(d_i->d_flags & DENTRY_NEGATIVE))
958 /* another place where we'd be better off with tools, not sol'ns */
959 hashtable_remove(sb->s_dcache, d_i);
960 TAILQ_REMOVE(&sb->s_lru_d, d_i, d_lru);
961 TAILQ_INSERT_HEAD(&victims, d_i, d_lru);
964 spin_unlock(&sb->s_lru_lock);
965 spin_unlock(&sb->s_dcache_lock);
966 /* Now do the actual freeing, outside of the hash/LRU list locks. This is
967 * necessary since __dentry_free() will decref its parent, which may get
968 * released and try to add itself to the LRU. */
969 TAILQ_FOREACH_SAFE(d_i, &victims, d_lru, temp) {
970 TAILQ_REMOVE(&victims, d_i, d_lru);
971 assert(!kref_refcnt(&d_i->d_kref));
974 /* It is possible at this point that there are new items on the LRU. We
975 * could loop back until that list is empty, if we care about this. */
978 /* Inode Functions */
980 /* Creates and initializes a new inode. Generic fields are filled in.
981 * FS-specific fields are filled in by the callout. Specific fields are filled
982 * in in read_inode() based on what's on the disk for a given i_no, or when the
983 * inode is created (for new objects).
985 * i_no is set by the caller. Note that this means this inode can be for an
986 * inode that is already on disk, or it can be used when creating. */
987 struct inode *get_inode(struct dentry *dentry)
989 struct super_block *sb = dentry->d_sb;
990 /* FS allocs and sets the following: i_op, i_fop, i_pm.pm_op, and any FS
992 struct inode *inode = sb->s_op->alloc_inode(sb);
997 TAILQ_INSERT_HEAD(&sb->s_inodes, inode, i_sb_list); /* weak inode ref */
998 TAILQ_INIT(&inode->i_dentry);
999 TAILQ_INSERT_TAIL(&inode->i_dentry, dentry, d_alias); /* weak dentry ref*/
1000 /* one for the dentry->d_inode, one passed out */
1001 kref_init(&inode->i_kref, inode_release, 2);
1002 dentry->d_inode = inode;
1003 inode->i_ino = 0; /* set by caller later */
1004 inode->i_blksize = sb->s_blocksize;
1005 spinlock_init(&inode->i_lock);
1006 kref_get(&sb->s_kref, 1); /* could allow the dentry to pin it */
1008 inode->i_rdev = 0; /* this has no real meaning yet */
1009 inode->i_bdev = sb->s_bdev; /* storing an uncounted ref */
1010 inode->i_state = 0; /* need real states, like I_NEW */
1011 inode->dirtied_when = 0;
1013 atomic_set(&inode->i_writecount, 0);
1014 /* Set up the page_map structures. Default is to use the embedded one.
1015 * Might push some of this back into specific FSs. For now, the FS tells us
1016 * what pm_op they want via i_pm.pm_op, which we set again in pm_init() */
1017 inode->i_mapping = &inode->i_pm;
1018 pm_init(inode->i_mapping, inode->i_pm.pm_op, inode);
1022 /* Helper: loads/ reads in the inode numbered ino and attaches it to dentry */
1023 void load_inode(struct dentry *dentry, unsigned long ino)
1025 struct inode *inode;
1027 /* look it up in the inode cache first */
1028 inode = icache_get(dentry->d_sb, ino);
1030 /* connect the dentry to its inode */
1031 TAILQ_INSERT_TAIL(&inode->i_dentry, dentry, d_alias);
1032 dentry->d_inode = inode; /* storing the ref we got from icache_get */
1035 /* otherwise, we need to do it manually */
1036 inode = get_inode(dentry);
1038 dentry->d_sb->s_op->read_inode(inode);
1039 /* TODO: race here, two creators could miss in the cache, and then get here.
1040 * need a way to sync across a blocking call. needs to be either at this
1041 * point in the code or per the ino (dentries could be different) */
1042 icache_put(dentry->d_sb, inode);
1043 kref_put(&inode->i_kref);
1046 /* Helper op, used when creating regular files, directories, symlinks, etc.
1047 * Note we make a distinction between the mode and the file type (for now).
1048 * After calling this, call the FS specific version (create or mkdir), which
1049 * will set the i_ino, the filetype, and do any other FS-specific stuff. Also
1050 * note that a lot of inode stuff was initialized in get_inode/alloc_inode. The
1051 * stuff here is pertinent to the specific creator (user), mode, and time. Also
1052 * note we don't pass this an nd, like Linux does... */
1053 static struct inode *create_inode(struct dentry *dentry, int mode)
1055 /* note it is the i_ino that uniquely identifies a file in the specific
1056 * filesystem. there's a diff between creating an inode (even for an in-use
1057 * ino) and then filling it in, and vs creating a brand new one.
1058 * get_inode() sets it to 0, and it should be filled in later in an
1059 * FS-specific manner. */
1060 struct inode *inode = get_inode(dentry);
1063 inode->i_mode = mode & S_PMASK; /* note that after this, we have no type */
1066 inode->i_blocks = 0;
1067 set_acmtime(inode, VFS_MTIME);
1068 inode->i_bdev = inode->i_sb->s_bdev;
1069 /* when we have notions of users, do something here: */
1075 /* Create a new disk inode in dir associated with dentry, with the given mode.
1076 * called when creating a regular file. dir is the directory/parent. dentry is
1077 * the dentry of the inode we are creating. Note the lack of the nd... */
1078 int create_file(struct inode *dir, struct dentry *dentry, int mode)
1080 struct inode *new_file = create_inode(dentry, mode);
1083 dir->i_op->create(dir, dentry, mode, 0);
1084 set_acmtime(dir, VFS_MTIME);
1085 icache_put(new_file->i_sb, new_file);
1086 kref_put(&new_file->i_kref);
1090 /* Creates a new inode for a directory associated with dentry in dir with the
1092 int create_dir(struct inode *dir, struct dentry *dentry, int mode)
1094 struct inode *new_dir = create_inode(dentry, mode);
1097 dir->i_op->mkdir(dir, dentry, mode);
1098 dir->i_nlink++; /* Directories get a hardlink for every child dir */
1099 /* Make sure my parent tracks me. This is okay, since no directory (dir)
1100 * can have more than one dentry */
1101 struct dentry *parent = TAILQ_FIRST(&dir->i_dentry);
1102 assert(parent && parent == TAILQ_LAST(&dir->i_dentry, dentry_tailq));
1103 /* parent dentry tracks dentry as a subdir, weak reference */
1104 TAILQ_INSERT_TAIL(&parent->d_subdirs, dentry, d_subdirs_link);
1105 set_acmtime(dir, VFS_MTIME);
1106 icache_put(new_dir->i_sb, new_dir);
1107 kref_put(&new_dir->i_kref);
1111 /* Creates a new inode for a symlink associated with dentry in dir, containing
1112 * the symlink symname */
1113 int create_symlink(struct inode *dir, struct dentry *dentry,
1114 const char *symname, int mode)
1116 struct inode *new_sym = create_inode(dentry, mode);
1119 dir->i_op->symlink(dir, dentry, symname);
1120 set_acmtime(dir, VFS_MTIME);
1121 icache_put(new_sym->i_sb, new_sym);
1122 kref_put(&new_sym->i_kref);
1126 /* Returns 0 if the given mode is acceptable for the inode, and an appropriate
1127 * error code if not. Needs to be writen, based on some sensible rules, and
1128 * will also probably use 'current' */
1129 int check_perms(struct inode *inode, int access_mode)
1131 return 0; /* anything goes! */
1134 /* Called after all external refs are gone to clean up the inode. Once this is
1135 * called, all dentries pointing here are already done (one of them triggered
1136 * this via kref_put(). */
1137 void inode_release(struct kref *kref)
1139 struct inode *inode = container_of(kref, struct inode, i_kref);
1140 TAILQ_REMOVE(&inode->i_sb->s_inodes, inode, i_sb_list);
1141 icache_remove(inode->i_sb, inode->i_ino);
1142 /* Might need to write back or delete the file/inode */
1143 if (inode->i_nlink) {
1144 if (inode->i_state & I_STATE_DIRTY)
1145 inode->i_sb->s_op->write_inode(inode, TRUE);
1147 inode->i_sb->s_op->delete_inode(inode);
1149 if (S_ISFIFO(inode->i_mode)) {
1150 page_decref(kva2page(inode->i_pipe->p_buf));
1151 kfree(inode->i_pipe);
1154 // kref_put(inode->i_bdev->kref); /* assuming it's a bdev, could be a pipe*/
1155 /* Either way, we dealloc the in-memory version */
1156 inode->i_sb->s_op->dealloc_inode(inode); /* FS-specific clean-up */
1157 kref_put(&inode->i_sb->s_kref);
1158 /* TODO: clean this up */
1159 assert(inode->i_mapping == &inode->i_pm);
1160 kmem_cache_free(inode_kcache, inode);
1163 /* Fills in kstat with the stat information for the inode */
1164 void stat_inode(struct inode *inode, struct kstat *kstat)
1166 kstat->st_dev = inode->i_sb->s_dev;
1167 kstat->st_ino = inode->i_ino;
1168 kstat->st_mode = inode->i_mode;
1169 kstat->st_nlink = inode->i_nlink;
1170 kstat->st_uid = inode->i_uid;
1171 kstat->st_gid = inode->i_gid;
1172 kstat->st_rdev = inode->i_rdev;
1173 kstat->st_size = inode->i_size;
1174 kstat->st_blksize = inode->i_blksize;
1175 kstat->st_blocks = inode->i_blocks;
1176 kstat->st_atim = inode->i_atime;
1177 kstat->st_mtim = inode->i_mtime;
1178 kstat->st_ctim = inode->i_ctime;
1181 void print_kstat(struct kstat *kstat)
1183 printk("kstat info for %p:\n", kstat);
1184 printk("\tst_dev : %p\n", kstat->st_dev);
1185 printk("\tst_ino : %p\n", kstat->st_ino);
1186 printk("\tst_mode : %p\n", kstat->st_mode);
1187 printk("\tst_nlink : %p\n", kstat->st_nlink);
1188 printk("\tst_uid : %p\n", kstat->st_uid);
1189 printk("\tst_gid : %p\n", kstat->st_gid);
1190 printk("\tst_rdev : %p\n", kstat->st_rdev);
1191 printk("\tst_size : %p\n", kstat->st_size);
1192 printk("\tst_blksize: %p\n", kstat->st_blksize);
1193 printk("\tst_blocks : %p\n", kstat->st_blocks);
1194 printk("\tst_atime : %p\n", kstat->st_atim);
1195 printk("\tst_mtime : %p\n", kstat->st_mtim);
1196 printk("\tst_ctime : %p\n", kstat->st_ctim);
1199 /* Inode Cache management. In general, search on the ino, get a refcnt'd value
1200 * back. Remove does not give you a reference back - it should only be called
1201 * in inode_release(). */
1202 struct inode *icache_get(struct super_block *sb, unsigned long ino)
1204 /* This is the same style as in pid2proc, it's the "safely create a strong
1205 * reference from a weak one, so long as other strong ones exist" pattern */
1206 spin_lock(&sb->s_icache_lock);
1207 struct inode *inode = hashtable_search(sb->s_icache, (void*)ino);
1209 if (!kref_get_not_zero(&inode->i_kref, 1))
1211 spin_unlock(&sb->s_icache_lock);
1215 void icache_put(struct super_block *sb, struct inode *inode)
1217 spin_lock(&sb->s_icache_lock);
1218 /* there's a race in load_ino() that could trigger this */
1219 assert(!hashtable_search(sb->s_icache, (void*)inode->i_ino));
1220 hashtable_insert(sb->s_icache, (void*)inode->i_ino, inode);
1221 spin_unlock(&sb->s_icache_lock);
1224 struct inode *icache_remove(struct super_block *sb, unsigned long ino)
1226 struct inode *inode;
1227 /* Presumably these hashtable removals could be easier since callers
1228 * actually know who they are (same with the pid2proc hash) */
1229 spin_lock(&sb->s_icache_lock);
1230 inode = hashtable_remove(sb->s_icache, (void*)ino);
1231 spin_unlock(&sb->s_icache_lock);
1232 assert(inode && !kref_refcnt(&inode->i_kref));
1236 /* File functions */
1238 /* Read count bytes from the file into buf, starting at *offset, which is
1239 * increased accordingly, returning the number of bytes transfered. Most
1240 * filesystems will use this function for their f_op->read.
1241 * Note, this uses the page cache. */
1242 ssize_t generic_file_read(struct file *file, char *buf, size_t count,
1248 unsigned long first_idx, last_idx;
1251 /* read in offset, in case of a concurrent reader/writer, so we don't screw
1252 * up our math for count, the idxs, etc. */
1253 off64_t orig_off = ACCESS_ONCE(*offset);
1255 /* Consider pushing some error checking higher in the VFS */
1258 if (!(file->f_flags & O_READ)) {
1262 if (orig_off >= file->f_dentry->d_inode->i_size)
1264 /* Make sure we don't go past the end of the file */
1265 if (orig_off + count > file->f_dentry->d_inode->i_size) {
1266 count = file->f_dentry->d_inode->i_size - orig_off;
1268 assert((long)count > 0);
1269 page_off = orig_off & (PGSIZE - 1);
1270 first_idx = orig_off >> PGSHIFT;
1271 last_idx = (orig_off + count) >> PGSHIFT;
1272 buf_end = buf + count;
1273 /* For each file page, make sure it's in the page cache, then copy it out.
1274 * TODO: will probably need to consider concurrently truncated files here.*/
1275 for (int i = first_idx; i <= last_idx; i++) {
1276 error = pm_load_page(file->f_mapping, i, &page);
1277 assert(!error); /* TODO: handle ENOMEM and friends */
1278 copy_amt = MIN(PGSIZE - page_off, buf_end - buf);
1279 /* TODO: (KFOP) Probably shouldn't do this. Either memcpy directly, or
1280 * split out the is_user_r(w)addr from copy_{to,from}_user() */
1281 if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
1282 memcpy_to_user(current, buf, page2kva(page) + page_off, copy_amt);
1284 memcpy(buf, page2kva(page) + page_off, copy_amt);
1287 pm_put_page(page); /* it's still in the cache, we just don't need it */
1289 assert(buf == buf_end);
1290 /* could have concurrent file ops that screw with offset, so userspace isn't
1291 * safe. but at least it'll be a value that one of the concurrent ops could
1292 * have produced (compared to *offset_changed_concurrently += count. */
1293 *offset = orig_off + count;
1294 set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
1298 /* Write count bytes from buf to the file, starting at *offset, which is
1299 * increased accordingly, returning the number of bytes transfered. Most
1300 * filesystems will use this function for their f_op->write. Note, this uses
1303 * Changes don't get flushed to disc til there is an fsync, page cache eviction,
1304 * or other means of trying to writeback the pages. */
1305 ssize_t generic_file_write(struct file *file, const char *buf, size_t count,
1311 unsigned long first_idx, last_idx;
1313 const char *buf_end;
1314 off64_t orig_off = ACCESS_ONCE(*offset);
1316 /* Consider pushing some error checking higher in the VFS */
1319 if (!(file->f_flags & O_WRITE)) {
1323 if (file->f_flags & O_APPEND) {
1324 spin_lock(&file->f_dentry->d_inode->i_lock);
1325 orig_off = file->f_dentry->d_inode->i_size;
1326 /* setting the filesize here, instead of during the extend-check, since
1327 * we need to atomically reserve space and set our write position. */
1328 file->f_dentry->d_inode->i_size += count;
1329 spin_unlock(&file->f_dentry->d_inode->i_lock);
1331 if (orig_off + count > file->f_dentry->d_inode->i_size) {
1332 /* lock for writes to i_size. we allow lockless reads. recheck
1333 * i_size in case of concurrent writers since our orig check. */
1334 spin_lock(&file->f_dentry->d_inode->i_lock);
1335 if (orig_off + count > file->f_dentry->d_inode->i_size)
1336 file->f_dentry->d_inode->i_size = orig_off + count;
1337 spin_unlock(&file->f_dentry->d_inode->i_lock);
1340 page_off = orig_off & (PGSIZE - 1);
1341 first_idx = orig_off >> PGSHIFT;
1342 last_idx = (orig_off + count) >> PGSHIFT;
1343 buf_end = buf + count;
1344 /* For each file page, make sure it's in the page cache, then write it.*/
1345 for (int i = first_idx; i <= last_idx; i++) {
1346 error = pm_load_page(file->f_mapping, i, &page);
1347 assert(!error); /* TODO: handle ENOMEM and friends */
1348 copy_amt = MIN(PGSIZE - page_off, buf_end - buf);
1349 /* TODO: (UMEM) (KFOP) think about this. */
1350 if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
1351 memcpy_from_user(current, page2kva(page) + page_off, buf, copy_amt);
1353 memcpy(page2kva(page) + page_off, buf, copy_amt);
1356 atomic_or(&page->pg_flags, PG_DIRTY);
1357 pm_put_page(page); /* it's still in the cache, we just don't need it */
1359 assert(buf == buf_end);
1360 *offset = orig_off + count;
1361 set_acmtime(file->f_dentry->d_inode, VFS_MTIME);
1365 /* Directories usually use this for their read method, which is the way glibc
1366 * currently expects us to do a readdir (short of doing linux's getdents). Will
1367 * probably need work, based on whatever real programs want. */
1368 ssize_t generic_dir_read(struct file *file, char *u_buf, size_t count,
1371 struct kdirent dir_r = {0}, *dirent = &dir_r;
1373 size_t amt_copied = 0;
1374 char *buf_end = u_buf + count;
1376 if (!S_ISDIR(file->f_dentry->d_inode->i_mode)) {
1382 if (!(file->f_flags & O_READ)) {
1386 /* start readdir from where it left off: */
1387 dirent->d_off = *offset;
1389 u_buf + sizeof(struct kdirent) <= buf_end;
1390 u_buf += sizeof(struct kdirent)) {
1391 /* TODO: UMEM/KFOP (pin the u_buf in the syscall, ditch the local copy,
1392 * get rid of this memcpy and reliance on current, etc). Might be
1393 * tricky with the dirent->d_off and trust issues */
1394 retval = file->f_op->readdir(file, dirent);
1399 /* Slight info exposure: could be extra crap after the name in the
1400 * dirent (like the name of a deleted file) */
1401 if (!is_ktask(per_cpu_info[core_id()].cur_kthread))
1402 memcpy_to_user(current, u_buf, dirent, sizeof(struct dirent));
1404 memcpy(u_buf, dirent, sizeof(struct dirent));
1405 amt_copied += sizeof(struct dirent);
1406 /* 0 signals end of directory */
1410 /* Next time read is called, we pick up where we left off */
1411 *offset = dirent->d_off; /* UMEM */
1412 /* important to tell them how much they got. they often keep going til they
1413 * get 0 back (in the case of ls). It's also how much has been read, but it
1414 * isn't how much the f_pos has moved (which is opaque to the VFS). */
1415 set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
1419 /* Opens the file, using permissions from current for lack of a better option.
1420 * It will attempt to create the file if it does not exist and O_CREAT is
1421 * specified. This will return 0 on failure, and set errno. TODO: There's some
1422 * stuff that we don't do, esp related file truncating/creation. flags are for
1423 * opening, the mode is for creating. The flags related to how to create
1424 * (O_CREAT_FLAGS) are handled in this function, not in create_file().
1426 * It's tempting to split this into a do_file_create and a do_file_open, based
1427 * on the O_CREAT flag, but the O_CREAT flag can be ignored if the file exists
1428 * already and O_EXCL isn't specified. We could have open call create if it
1429 * fails, but for now we'll keep it as is. */
1430 struct file *do_file_open(char *path, int flags, int mode)
1432 struct file *file = 0;
1433 struct dentry *file_d;
1434 struct inode *parent_i;
1435 struct nameidata nd_r = {0}, *nd = &nd_r;
1437 unsigned long nr_pages;
1439 /* The file might exist, lets try to just open it right away */
1440 nd->intent = LOOKUP_OPEN;
1441 error = path_lookup(path, LOOKUP_FOLLOW, nd);
1443 if (S_ISDIR(nd->dentry->d_inode->i_mode) && (flags & O_WRITE)) {
1447 /* Also need to make sure we didn't want to O_EXCL create */
1448 if ((flags & O_CREAT) && (flags & O_EXCL)) {
1452 file_d = nd->dentry;
1453 kref_get(&file_d->d_kref, 1);
1456 if (!(flags & O_CREAT)) {
1460 /* So it didn't already exist, release the path from the previous lookup,
1461 * and then we try to create it. */
1463 /* get the parent, following links. this means you get the parent of the
1464 * final link (which may not be in 'path' in the first place. */
1465 nd->intent = LOOKUP_CREATE;
1466 error = path_lookup(path, LOOKUP_PARENT | LOOKUP_FOLLOW, nd);
1471 /* see if the target is there (shouldn't be), and handle accordingly */
1472 file_d = do_lookup(nd->dentry, nd->last.name);
1474 if (!(flags & O_CREAT)) {
1475 warn("Extremely unlikely race, probably a bug");
1479 /* Create the inode/file. get a fresh dentry too: */
1480 file_d = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
1483 parent_i = nd->dentry->d_inode;
1484 /* Note that the mode technically should only apply to future opens,
1485 * but we apply it immediately. */
1486 if (create_file(parent_i, file_d, mode)) /* sets errno */
1488 dcache_put(file_d->d_sb, file_d);
1489 } else { /* something already exists */
1490 /* this can happen due to concurrent access, but needs to be thought
1492 panic("File shouldn't be here!");
1493 if ((flags & O_CREAT) && (flags & O_EXCL)) {
1494 /* wanted to create, not open, bail out */
1500 /* now open the file (freshly created or if it already existed). At this
1501 * point, file_d is a refcnt'd dentry, regardless of which branch we took.*/
1502 if (flags & O_TRUNC) {
1503 spin_lock(&file_d->d_inode->i_lock);
1504 nr_pages = ROUNDUP(file_d->d_inode->i_size, PGSIZE) >> PGSHIFT;
1505 file_d->d_inode->i_size = 0;
1506 spin_unlock(&file_d->d_inode->i_lock);
1507 pm_remove_contig(file_d->d_inode->i_mapping, 0, nr_pages);
1509 file = dentry_open(file_d, flags); /* sets errno */
1510 /* Note the fall through to the exit paths. File is 0 by default and if
1511 * dentry_open fails. */
1513 kref_put(&file_d->d_kref);
1519 /* Path is the location of the symlink, sometimes called the "new path", and
1520 * symname is who we link to, sometimes called the "old path". */
1521 int do_symlink(char *path, const char *symname, int mode)
1523 struct dentry *sym_d;
1524 struct inode *parent_i;
1525 struct nameidata nd_r = {0}, *nd = &nd_r;
1529 nd->intent = LOOKUP_CREATE;
1530 /* get the parent, but don't follow links */
1531 error = path_lookup(path, LOOKUP_PARENT, nd);
1536 /* see if the target is already there, handle accordingly */
1537 sym_d = do_lookup(nd->dentry, nd->last.name);
1542 /* Doesn't already exist, let's try to make it: */
1543 sym_d = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
1546 parent_i = nd->dentry->d_inode;
1547 if (create_symlink(parent_i, sym_d, symname, mode))
1549 set_acmtime(parent_i, VFS_MTIME);
1550 dcache_put(sym_d->d_sb, sym_d);
1551 retval = 0; /* Note the fall through to the exit paths */
1553 kref_put(&sym_d->d_kref);
1559 /* Makes a hard link for the file behind old_path to new_path */
1560 int do_link(char *old_path, char *new_path)
1562 struct dentry *link_d, *old_d;
1563 struct inode *inode, *parent_dir;
1564 struct nameidata nd_r = {0}, *nd = &nd_r;
1568 nd->intent = LOOKUP_CREATE;
1569 /* get the absolute parent of the new_path */
1570 error = path_lookup(new_path, LOOKUP_PARENT | LOOKUP_FOLLOW, nd);
1575 parent_dir = nd->dentry->d_inode;
1576 /* see if the new target is already there, handle accordingly */
1577 link_d = do_lookup(nd->dentry, nd->last.name);
1582 /* Doesn't already exist, let's try to make it. Still need to stitch it to
1583 * an inode and set its FS-specific stuff after this.*/
1584 link_d = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
1587 /* Now let's get the old_path target */
1588 old_d = lookup_dentry(old_path, LOOKUP_FOLLOW);
1589 if (!old_d) /* errno set by lookup_dentry */
1591 /* For now, can only link to files */
1592 if (!S_ISREG(old_d->d_inode->i_mode)) {
1596 /* Must be on the same FS */
1597 if (old_d->d_sb != link_d->d_sb) {
1601 /* Do whatever FS specific stuff there is first (which is also a chance to
1603 error = parent_dir->i_op->link(old_d, parent_dir, link_d);
1608 set_acmtime(parent_dir, VFS_MTIME);
1609 /* Finally stitch it up */
1610 inode = old_d->d_inode;
1611 kref_get(&inode->i_kref, 1);
1612 link_d->d_inode = inode;
1614 TAILQ_INSERT_TAIL(&inode->i_dentry, link_d, d_alias); /* weak ref */
1615 dcache_put(link_d->d_sb, link_d);
1616 retval = 0; /* Note the fall through to the exit paths */
1618 kref_put(&old_d->d_kref);
1620 kref_put(&link_d->d_kref);
1626 /* Unlinks path from the directory tree. Read the Documentation for more info.
1628 int do_unlink(char *path)
1630 struct dentry *dentry;
1631 struct inode *parent_dir;
1632 struct nameidata nd_r = {0}, *nd = &nd_r;
1636 /* get the parent of the target, and don't follow a final link */
1637 error = path_lookup(path, LOOKUP_PARENT, nd);
1642 parent_dir = nd->dentry->d_inode;
1643 /* make sure the target is there */
1644 dentry = do_lookup(nd->dentry, nd->last.name);
1649 /* Make sure the target is not a directory */
1650 if (S_ISDIR(dentry->d_inode->i_mode)) {
1654 /* Remove the dentry from its parent */
1655 error = parent_dir->i_op->unlink(parent_dir, dentry);
1660 set_acmtime(parent_dir, VFS_MTIME);
1661 /* Now that our parent doesn't track us, we need to make sure we aren't
1662 * findable via the dentry cache. DYING, so we will be freed in
1663 * dentry_release() */
1664 dentry->d_flags |= DENTRY_DYING;
1665 dcache_remove(dentry->d_sb, dentry);
1666 dentry->d_inode->i_nlink--; /* TODO: race here, esp with a decref */
1667 /* At this point, the dentry is unlinked from the FS, and the inode has one
1668 * less link. When the in-memory objects (dentry, inode) are going to be
1669 * released (after all open files are closed, and maybe after entries are
1670 * evicted from the cache), then nlinks will get checked and the FS-file
1671 * will get removed from the disk */
1672 retval = 0; /* Note the fall through to the exit paths */
1674 kref_put(&dentry->d_kref);
1680 /* Checks to see if path can be accessed via mode. Need to actually send the
1681 * mode along somehow, so this doesn't do much now. This is an example of
1682 * decent error propagation from the lower levels via int retvals. */
1683 int do_access(char *path, int mode)
1685 struct nameidata nd_r = {0}, *nd = &nd_r;
1687 nd->intent = LOOKUP_ACCESS;
1688 retval = path_lookup(path, 0, nd);
1693 int do_file_chmod(struct file *file, int mode)
1695 int old_mode_ftype = file->f_dentry->d_inode->i_mode & __S_IFMT;
1697 /* TODO: when we have notions of uid, check for the proc's uid */
1698 if (file->f_dentry->d_inode->i_uid != UID_OF_ME)
1702 file->f_dentry->d_inode->i_mode = (mode & S_PMASK) | old_mode_ftype;
1703 set_acmtime(file->f_dentry->d_inode, VFS_CTIME);
1707 /* Make a directory at path with mode. Returns -1 and sets errno on errors */
1708 int do_mkdir(char *path, int mode)
1710 struct dentry *dentry;
1711 struct inode *parent_i;
1712 struct nameidata nd_r = {0}, *nd = &nd_r;
1716 /* The dir might exist and might be /, so we can't look for the parent */
1717 nd->intent = LOOKUP_OPEN;
1718 error = path_lookup(path, LOOKUP_FOLLOW, nd);
1724 nd->intent = LOOKUP_CREATE;
1725 /* get the parent, but don't follow links */
1726 error = path_lookup(path, LOOKUP_PARENT, nd);
1731 /* Doesn't already exist, let's try to make it: */
1732 dentry = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
1735 parent_i = nd->dentry->d_inode;
1736 if (create_dir(parent_i, dentry, mode))
1738 set_acmtime(parent_i, VFS_MTIME);
1739 dcache_put(dentry->d_sb, dentry);
1740 retval = 0; /* Note the fall through to the exit paths */
1742 kref_put(&dentry->d_kref);
1748 int do_rmdir(char *path)
1750 struct dentry *dentry;
1751 struct inode *parent_i;
1752 struct nameidata nd_r = {0}, *nd = &nd_r;
1756 /* get the parent, following links (probably want this), and we must get a
1757 * directory. Note, current versions of path_lookup can't handle both
1758 * PARENT and DIRECTORY, at least, it doesn't check that *path is a
1760 error = path_lookup(path, LOOKUP_PARENT | LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
1766 /* make sure the target is already there, handle accordingly */
1767 dentry = do_lookup(nd->dentry, nd->last.name);
1772 if (!S_ISDIR(dentry->d_inode->i_mode)) {
1776 if (dentry->d_mount_point) {
1780 /* TODO: make sure we aren't a mount or processes root (EBUSY) */
1781 /* Now for the removal. the FSs will check if they are empty */
1782 parent_i = nd->dentry->d_inode;
1783 error = parent_i->i_op->rmdir(parent_i, dentry);
1788 set_acmtime(parent_i, VFS_MTIME);
1789 /* Now that our parent doesn't track us, we need to make sure we aren't
1790 * findable via the dentry cache. DYING, so we will be freed in
1791 * dentry_release() */
1792 dentry->d_flags |= DENTRY_DYING;
1793 dcache_remove(dentry->d_sb, dentry);
1794 /* Decref ourselves, so inode_release() knows we are done */
1795 dentry->d_inode->i_nlink--;
1796 TAILQ_REMOVE(&nd->dentry->d_subdirs, dentry, d_subdirs_link);
1797 parent_i->i_nlink--; /* TODO: race on this, esp since its a decref */
1798 /* we still have d_parent and a kref on our parent, which will go away when
1799 * the in-memory dentry object goes away. */
1800 retval = 0; /* Note the fall through to the exit paths */
1802 kref_put(&dentry->d_kref);
1808 /* Pipes: Doing a simple buffer with reader and writer offsets. Size is power
1809 * of two, so we can easily compute its status and whatnot. */
1811 #define PIPE_SZ (1 << PGSHIFT)
1813 static size_t pipe_get_rd_idx(struct pipe_inode_info *pii)
1815 return pii->p_rd_off & (PIPE_SZ - 1);
1818 static size_t pipe_get_wr_idx(struct pipe_inode_info *pii)
1821 return pii->p_wr_off & (PIPE_SZ - 1);
1824 static bool pipe_is_empty(struct pipe_inode_info *pii)
1826 return __ring_empty(pii->p_wr_off, pii->p_rd_off);
1829 static bool pipe_is_full(struct pipe_inode_info *pii)
1831 return __ring_full(PIPE_SZ, pii->p_wr_off, pii->p_rd_off);
1834 static size_t pipe_nr_full(struct pipe_inode_info *pii)
1836 return __ring_nr_full(pii->p_wr_off, pii->p_rd_off);
1839 static size_t pipe_nr_empty(struct pipe_inode_info *pii)
1841 return __ring_nr_empty(PIPE_SZ, pii->p_wr_off, pii->p_rd_off);
1844 ssize_t pipe_file_read(struct file *file, char *buf, size_t count,
1847 struct pipe_inode_info *pii = file->f_dentry->d_inode->i_pipe;
1848 size_t copy_amt, amt_copied = 0;
1850 cv_lock(&pii->p_cv);
1851 while (pipe_is_empty(pii)) {
1852 /* We wait til the pipe is drained before sending EOF if there are no
1853 * writers (instead of aborting immediately) */
1854 if (!pii->p_nr_writers) {
1855 cv_unlock(&pii->p_cv);
1858 if (file->f_flags & O_NONBLOCK) {
1859 cv_unlock(&pii->p_cv);
1863 cv_wait(&pii->p_cv);
1866 /* We might need to wrap-around with our copy, so we'll do the copy in two
1867 * passes. This will copy up to the end of the buffer, then on the next
1868 * pass will copy the rest to the beginning of the buffer (if necessary) */
1869 for (int i = 0; i < 2; i++) {
1870 copy_amt = MIN(PIPE_SZ - pipe_get_rd_idx(pii),
1871 MIN(pipe_nr_full(pii), count));
1872 assert(current); /* shouldn't pipe from the kernel */
1873 memcpy_to_user(current, buf, pii->p_buf + pipe_get_rd_idx(pii),
1877 pii->p_rd_off += copy_amt;
1878 amt_copied += copy_amt;
1880 /* Just using one CV for both readers and writers. We should rarely have
1881 * multiple readers or writers. */
1883 __cv_broadcast(&pii->p_cv);
1884 cv_unlock(&pii->p_cv);
1885 set_acmtime(file->f_dentry->d_inode, VFS_ATIME);
1889 /* Note: we're not dealing with PIPE_BUF and minimum atomic chunks, unless I
1891 ssize_t pipe_file_write(struct file *file, const char *buf, size_t count,
1894 struct pipe_inode_info *pii = file->f_dentry->d_inode->i_pipe;
1895 size_t copy_amt, amt_copied = 0;
1897 cv_lock(&pii->p_cv);
1898 /* Write aborts right away if there are no readers, regardless of pipe
1900 if (!pii->p_nr_readers) {
1901 cv_unlock(&pii->p_cv);
1905 while (pipe_is_full(pii)) {
1906 if (file->f_flags & O_NONBLOCK) {
1907 cv_unlock(&pii->p_cv);
1911 cv_wait(&pii->p_cv);
1913 /* Still need to check in the loop, in case the last reader left while
1915 if (!pii->p_nr_readers) {
1916 cv_unlock(&pii->p_cv);
1921 /* We might need to wrap-around with our copy, so we'll do the copy in two
1922 * passes. This will copy up to the end of the buffer, then on the next
1923 * pass will copy the rest to the beginning of the buffer (if necessary) */
1924 for (int i = 0; i < 2; i++) {
1925 copy_amt = MIN(PIPE_SZ - pipe_get_wr_idx(pii),
1926 MIN(pipe_nr_empty(pii), count));
1927 assert(current); /* shouldn't pipe from the kernel */
1928 memcpy_from_user(current, pii->p_buf + pipe_get_wr_idx(pii), buf,
1932 pii->p_wr_off += copy_amt;
1933 amt_copied += copy_amt;
1935 /* Just using one CV for both readers and writers. We should rarely have
1936 * multiple readers or writers. */
1938 __cv_broadcast(&pii->p_cv);
1939 cv_unlock(&pii->p_cv);
1940 set_acmtime(file->f_dentry->d_inode, VFS_MTIME);
1944 /* In open and release, we need to track the number of readers and writers,
1945 * which we can differentiate by the file flags. */
1946 int pipe_open(struct inode *inode, struct file *file)
1948 struct pipe_inode_info *pii = inode->i_pipe;
1949 cv_lock(&pii->p_cv);
1950 /* Ugliness due to not using flags for O_RDONLY and friends... */
1951 if (file->f_mode == S_IRUSR) {
1952 pii->p_nr_readers++;
1953 } else if (file->f_mode == S_IWUSR) {
1954 pii->p_nr_writers++;
1956 warn("Bad pipe file flags 0x%x\n", file->f_flags);
1958 cv_unlock(&pii->p_cv);
1962 int pipe_release(struct inode *inode, struct file *file)
1964 struct pipe_inode_info *pii = inode->i_pipe;
1965 cv_lock(&pii->p_cv);
1966 /* Ugliness due to not using flags for O_RDONLY and friends... */
1967 if (file->f_mode == S_IRUSR) {
1968 pii->p_nr_readers--;
1969 } else if (file->f_mode == S_IWUSR) {
1970 pii->p_nr_writers--;
1972 warn("Bad pipe file flags 0x%x\n", file->f_flags);
1974 /* need to wake up any sleeping readers/writers, since we might be done */
1975 __cv_broadcast(&pii->p_cv);
1976 cv_unlock(&pii->p_cv);
1980 struct file_operations pipe_f_op = {
1981 .read = pipe_file_read,
1982 .write = pipe_file_write,
1984 .release = pipe_release,
1988 void pipe_debug(struct file *f)
1990 struct pipe_inode_info *pii = f->f_dentry->d_inode->i_pipe;
1992 printk("PIPE %p\n", pii);
1993 printk("\trdoff %p\n", pii->p_rd_off);
1994 printk("\twroff %p\n", pii->p_wr_off);
1995 printk("\tnr_rds %d\n", pii->p_nr_readers);
1996 printk("\tnr_wrs %d\n", pii->p_nr_writers);
1997 printk("\tcv waiters %d\n", pii->p_cv.nr_waiters);
2001 /* General plan: get a dentry/inode to represent the pipe. We'll alloc it from
2002 * the default_ns SB, but won't actually link it anywhere. It'll only be held
2003 * alive by the krefs, til all the FDs are closed. */
2004 int do_pipe(struct file **pipe_files, int flags)
2006 struct dentry *pipe_d;
2007 struct inode *pipe_i;
2008 struct file *pipe_f_read, *pipe_f_write;
2009 struct super_block *def_sb = default_ns.root->mnt_sb;
2010 struct pipe_inode_info *pii;
2012 pipe_d = get_dentry(def_sb, 0, "pipe");
2015 pipe_d->d_op = &dummy_d_op;
2016 pipe_i = get_inode(pipe_d);
2018 goto error_post_dentry;
2019 /* preemptively mark the dentry for deletion. we have an unlinked dentry
2020 * right off the bat, held in only by the kref chain (pipe_d is the ref). */
2021 pipe_d->d_flags |= DENTRY_DYING;
2022 /* pipe_d->d_inode still has one ref to pipe_i, keeping the inode alive */
2023 kref_put(&pipe_i->i_kref);
2024 /* init inode fields. note we're using the dummy ops for i_op and d_op */
2025 pipe_i->i_mode = S_IRWXU | S_IRWXG | S_IRWXO;
2026 SET_FTYPE(pipe_i->i_mode, __S_IFIFO); /* using type == FIFO */
2027 pipe_i->i_nlink = 1; /* one for the dentry */
2030 pipe_i->i_size = PGSIZE;
2031 pipe_i->i_blocks = 0;
2032 pipe_i->i_atime.tv_sec = 0;
2033 pipe_i->i_atime.tv_nsec = 0;
2034 pipe_i->i_mtime.tv_sec = 0;
2035 pipe_i->i_mtime.tv_nsec = 0;
2036 pipe_i->i_ctime.tv_sec = 0;
2037 pipe_i->i_ctime.tv_nsec = 0;
2038 pipe_i->i_fs_info = 0;
2039 pipe_i->i_op = &dummy_i_op;
2040 pipe_i->i_fop = &pipe_f_op;
2041 pipe_i->i_socket = FALSE;
2042 /* Actually build the pipe. We're using one page, hanging off the
2043 * pipe_inode_info struct. When we release the inode, we free the pipe
2045 pipe_i->i_pipe = kmalloc(sizeof(struct pipe_inode_info), MEM_WAIT);
2046 pii = pipe_i->i_pipe;
2051 pii->p_buf = kpage_zalloc_addr();
2058 pii->p_nr_readers = 0;
2059 pii->p_nr_writers = 0;
2060 cv_init(&pii->p_cv); /* must do this before dentry_open / pipe_open */
2061 /* Now we have an inode for the pipe. We need two files for the read and
2062 * write ends of the pipe. */
2063 flags &= ~(O_ACCMODE); /* avoid user bugs */
2064 pipe_f_read = dentry_open(pipe_d, flags | O_RDONLY);
2067 pipe_f_write = dentry_open(pipe_d, flags | O_WRONLY);
2070 pipe_files[0] = pipe_f_read;
2071 pipe_files[1] = pipe_f_write;
2075 kref_put(&pipe_f_read->f_kref);
2077 page_decref(kva2page(pii->p_buf));
2079 kfree(pipe_i->i_pipe);
2081 /* We don't need to free the pipe_i; putting the dentry will free it */
2083 /* Note we only free the dentry on failure. */
2084 kref_put(&pipe_d->d_kref);
2088 int do_rename(char *old_path, char *new_path)
2090 struct nameidata nd_old = {0}, *nd_o = &nd_old;
2091 struct nameidata nd_new = {0}, *nd_n = &nd_new;
2092 struct dentry *old_dir_d, *new_dir_d;
2093 struct inode *old_dir_i, *new_dir_i;
2094 struct dentry *old_d, *new_d, *unlink_d;
2098 nd_o->intent = LOOKUP_ACCESS; /* maybe, might need another type */
2100 /* get the parent, but don't follow links */
2101 error = path_lookup(old_path, LOOKUP_PARENT | LOOKUP_DIRECTORY, nd_o);
2107 old_dir_d = nd_o->dentry;
2108 old_dir_i = old_dir_d->d_inode;
2110 old_d = do_lookup(old_dir_d, nd_o->last.name);
2117 nd_n->intent = LOOKUP_CREATE;
2118 error = path_lookup(new_path, LOOKUP_PARENT | LOOKUP_DIRECTORY, nd_n);
2122 goto out_paths_and_src;
2124 new_dir_d = nd_n->dentry;
2125 new_dir_i = new_dir_d->d_inode;
2126 /* TODO if new_dir == old_dir, we might be able to simplify things */
2128 if (new_dir_i->i_sb != old_dir_i->i_sb) {
2131 goto out_paths_and_src;
2133 /* TODO: check_perms is lousy, want to just say "writable" here */
2134 if (check_perms(old_dir_i, S_IWUSR) || check_perms(new_dir_i, S_IWUSR)) {
2137 goto out_paths_and_src;
2139 /* TODO: if we're doing a rename that moves a directory, we need to make
2140 * sure the new_path doesn't include the old_path. It's not as simple as
2141 * just checking, since there could be a concurrent rename that breaks the
2142 * check later. e.g. what if new_dir's parent is being moved into a child
2145 * linux has a per-fs rename mutex for these scenarios, so only one can
2146 * proceed at a time. i don't see another way to deal with it either.
2147 * maybe something like flagging all dentries on the new_path with "do not
2150 /* TODO: this is all very racy. right after we do a new_d lookup, someone
2151 * else could create or unlink new_d. need to lock here, or else push this
2154 * For any locking scheme, we probably need to lock both the old and new
2155 * dirs. To prevent deadlock, we need a total ordering of all inodes (or
2156 * dentries, if we locking them instead). inode number or struct inode*
2157 * will work for this. */
2158 new_d = do_lookup(new_dir_d, nd_n->last.name);
2160 if (new_d->d_inode == old_d->d_inode)
2161 goto out_paths_and_refs; /* rename does nothing */
2162 /* TODO: Here's a bunch of other racy checks we need to do, maybe in the
2165 * if src is a dir, dst must be an empty dir if it exists (RACYx2)
2166 * racing on dst being created and it getting new entries
2167 * if src is a file, dst must be a file if it exists (RACY)
2168 * racing on dst being created and still being a file
2169 * racing on dst being unlinked and a new one being added
2171 /* TODO: we should allow empty dirs */
2172 if (S_ISDIR(new_d->d_inode->i_mode)) {
2175 goto out_paths_and_refs;
2177 /* TODO: need this to be atomic with rename */
2178 error = new_dir_i->i_op->unlink(new_dir_i, new_d);
2182 goto out_paths_and_refs;
2184 new_d->d_flags |= DENTRY_DYING;
2185 /* TODO: racy with other lookups on new_d */
2186 dcache_remove(new_d->d_sb, new_d);
2187 new_d->d_inode->i_nlink--; /* TODO: race here, esp with a decref */
2188 kref_put(&new_d->d_kref);
2190 /* new_d is just a vessel for the name. somewhat lousy. */
2191 new_d = get_dentry(new_dir_d->d_sb, new_dir_d, nd_n->last.name);
2193 /* TODO: more races. need to remove old_d from the dcache, since we're
2194 * about to change its parentage. could be readded concurrently. */
2195 dcache_remove(old_dir_d->d_sb, old_d);
2196 error = new_dir_i->i_op->rename(old_dir_i, old_d, new_dir_i, new_d);
2198 /* TODO: oh crap, we already unlinked! now we're screwed, and violated
2199 * our atomicity requirements. */
2200 printk("[kernel] rename failed, you might have lost data\n");
2203 goto out_paths_and_refs;
2206 /* old_dir loses old_d, new_dir gains old_d, renamed to new_d. this is
2207 * particularly cumbersome since there are two levels here: the FS has its
2208 * info about where things are, and the VFS has its dentry tree. and it's
2209 * all racy (TODO). */
2210 dentry_set_name(old_d, new_d->d_name.name);
2211 old_d->d_parent = new_d->d_parent;
2212 if (S_ISDIR(old_d->d_inode->i_mode)) {
2213 TAILQ_REMOVE(&old_dir_d->d_subdirs, old_d, d_subdirs_link);
2214 old_dir_i->i_nlink--; /* TODO: racy, etc */
2215 TAILQ_INSERT_TAIL(&new_dir_d->d_subdirs, old_d, d_subdirs_link);
2216 new_dir_i->i_nlink--; /* TODO: racy, etc */
2219 /* and then the third level: dcache stuff. we could have old versions of
2220 * old_d or negative versions of new_d sitting around. dcache_put should
2221 * replace a potentially negative dentry for new_d (now called old_d) */
2222 dcache_put(old_dir_d->d_sb, old_d);
2224 set_acmtime(old_dir_i, VFS_MTIME);
2225 set_acmtime(new_dir_i, VFS_MTIME);
2226 set_acmtime(old_d->d_inode, VFS_CTIME);
2230 kref_put(&new_d->d_kref);
2232 kref_put(&old_d->d_kref);
2240 int do_truncate(struct inode *inode, off64_t len)
2249 printk("[kernel] truncate for > petabyte, probably a bug\n");
2250 /* continuing, not too concerned. could set EINVAL or EFBIG */
2252 spin_lock(&inode->i_lock);
2253 old_len = inode->i_size;
2254 if (old_len == len) {
2255 spin_unlock(&inode->i_lock);
2258 inode->i_size = len;
2259 /* truncate can't block, since we're holding the spinlock. but it can rely
2260 * on that lock being held */
2261 inode->i_op->truncate(inode);
2262 spin_unlock(&inode->i_lock);
2264 if (old_len < len) {
2265 pm_remove_contig(inode->i_mapping, old_len >> PGSHIFT,
2266 (len >> PGSHIFT) - (old_len >> PGSHIFT));
2268 set_acmtime(inode, VFS_MTIME);
2272 struct file *alloc_file(void)
2274 struct file *file = kmem_cache_alloc(file_kcache, 0);
2279 /* one for the ref passed out*/
2280 kref_init(&file->f_kref, file_release, 1);
2284 /* Opens and returns the file specified by dentry */
2285 struct file *dentry_open(struct dentry *dentry, int flags)
2287 struct inode *inode;
2290 inode = dentry->d_inode;
2291 /* f_mode stores how the OS file is open, which can be more restrictive than
2293 desired_mode = omode_to_rwx(flags & O_ACCMODE);
2294 if (check_perms(inode, desired_mode))
2296 file = alloc_file();
2299 file->f_mode = desired_mode;
2300 /* Add to the list of all files of this SB */
2301 TAILQ_INSERT_TAIL(&inode->i_sb->s_files, file, f_list);
2302 kref_get(&dentry->d_kref, 1);
2303 file->f_dentry = dentry;
2304 kref_get(&inode->i_sb->s_mount->mnt_kref, 1);
2305 file->f_vfsmnt = inode->i_sb->s_mount; /* saving a ref to the vmnt...*/
2306 file->f_op = inode->i_fop;
2307 /* Don't store creation flags */
2308 file->f_flags = flags & ~O_CREAT_FLAGS;
2310 file->f_uid = inode->i_uid;
2311 file->f_gid = inode->i_gid;
2313 // struct event_poll_tailq f_ep_links;
2314 spinlock_init(&file->f_ep_lock);
2315 file->f_privdata = 0; /* prob overriden by the fs */
2316 file->f_mapping = inode->i_mapping;
2317 file->f_op->open(inode, file);
2324 /* Closes a file, fsync, whatever else is necessary. Called when the kref hits
2325 * 0. Note that the file is not refcounted on the s_files list, nor is the
2326 * f_mapping refcounted (it is pinned by the i_mapping). */
2327 void file_release(struct kref *kref)
2329 struct file *file = container_of(kref, struct file, f_kref);
2331 struct super_block *sb = file->f_dentry->d_sb;
2332 spin_lock(&sb->s_lock);
2333 TAILQ_REMOVE(&sb->s_files, file, f_list);
2334 spin_unlock(&sb->s_lock);
2336 /* TODO: fsync (BLK). also, we may want to parallelize the blocking that
2337 * could happen in here (spawn kernel threads)... */
2338 file->f_op->release(file->f_dentry->d_inode, file);
2339 /* Clean up the other refs we hold */
2340 kref_put(&file->f_dentry->d_kref);
2341 kref_put(&file->f_vfsmnt->mnt_kref);
2342 kmem_cache_free(file_kcache, file);
2345 ssize_t kread_file(struct file *file, void *buf, size_t sz)
2347 /* TODO: (KFOP) (VFS kernel read/writes need to be from a ktask) */
2348 uintptr_t old_ret = switch_to_ktask();
2350 ssize_t cpy_amt = file->f_op->read(file, buf, sz, &dummy);
2352 switch_back_from_ktask(old_ret);
2356 /* Reads the contents of an entire file into a buffer, returning that buffer.
2357 * On error, prints something useful and returns 0 */
2358 void *kread_whole_file(struct file *file)
2364 size = file->f_dentry->d_inode->i_size;
2365 contents = kmalloc(size, MEM_WAIT);
2366 cpy_amt = kread_file(file, contents, size);
2368 printk("Error %d reading file %s\n", get_errno(), file_name(file));
2372 if (cpy_amt != size) {
2373 printk("Read %d, needed %d for file %s\n", cpy_amt, size,
2381 /* Process-related File management functions */
2383 /* Given any FD, get the appropriate object, 0 o/w. Set vfs if you're looking
2384 * for a file, o/w a chan. Set incref if you want a reference count (which is a
2385 * 9ns thing, you can't use the pointer if you didn't incref). */
2386 void *lookup_fd(struct fd_table *fdt, int fd, bool incref, bool vfs)
2391 spin_lock(&fdt->lock);
2393 spin_unlock(&fdt->lock);
2396 if (fd < fdt->max_fdset) {
2397 if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, fd)) {
2398 /* while max_files and max_fdset might not line up, we should never
2399 * have a valid fdset higher than files */
2400 assert(fd < fdt->max_files);
2402 retval = fdt->fd[fd].fd_file;
2404 retval = fdt->fd[fd].fd_chan;
2405 /* retval could be 0 if we asked for the wrong one (e.g. it's a
2406 * file, but we asked for a chan) */
2407 if (retval && incref) {
2409 kref_get(&((struct file*)retval)->f_kref, 1);
2411 chan_incref((struct chan*)retval);
2415 spin_unlock(&fdt->lock);
2419 /* Given any FD, get the appropriate file, 0 o/w */
2420 struct file *get_file_from_fd(struct fd_table *open_files, int file_desc)
2422 return lookup_fd(open_files, file_desc, TRUE, TRUE);
2425 /* Grow the vfs fd set */
2426 static int grow_fd_set(struct fd_table *open_files)
2429 struct file_desc *nfd, *ofd;
2431 /* Only update open_fds once. If currently pointing to open_fds_init, then
2432 * update it to point to a newly allocated fd_set with space for
2433 * NR_FILE_DESC_MAX */
2434 if (open_files->open_fds == (struct fd_set*)&open_files->open_fds_init) {
2435 open_files->open_fds = kzmalloc(sizeof(struct fd_set), 0);
2436 memmove(open_files->open_fds, &open_files->open_fds_init,
2437 sizeof(struct small_fd_set));
2440 /* Grow the open_files->fd array in increments of NR_OPEN_FILES_DEFAULT */
2441 n = open_files->max_files + NR_OPEN_FILES_DEFAULT;
2442 if (n > NR_FILE_DESC_MAX)
2444 nfd = kzmalloc(n * sizeof(struct file_desc), 0);
2448 /* Move the old array on top of the new one */
2449 ofd = open_files->fd;
2450 memmove(nfd, ofd, open_files->max_files * sizeof(struct file_desc));
2452 /* Update the array and the maxes for both max_files and max_fdset */
2453 open_files->fd = nfd;
2454 open_files->max_files = n;
2455 open_files->max_fdset = n;
2457 /* Only free the old one if it wasn't pointing to open_files->fd_array */
2458 if (ofd != open_files->fd_array)
2463 /* Free the vfs fd set if necessary */
2464 static void free_fd_set(struct fd_table *open_files)
2467 if (open_files->open_fds != (struct fd_set*)&open_files->open_fds_init) {
2468 assert(open_files->fd != open_files->fd_array);
2469 /* need to reset the pointers to the internal addrs, in case we take a
2470 * look while debugging. 0 them out, since they have old data. our
2471 * current versions should all be closed. */
2472 memset(&open_files->open_fds_init, 0, sizeof(struct small_fd_set));
2473 memset(&open_files->fd_array, 0, sizeof(open_files->fd_array));
2475 free_me = open_files->open_fds;
2476 open_files->open_fds = (struct fd_set*)&open_files->open_fds_init;
2479 free_me = open_files->fd;
2480 open_files->fd = open_files->fd_array;
2485 /* If FD is in the group, remove it, decref it, and return TRUE. */
2486 bool close_fd(struct fd_table *fdt, int fd)
2488 struct file *file = 0;
2489 struct chan *chan = 0;
2490 struct fd_tap *tap = 0;
2494 spin_lock(&fdt->lock);
2495 if (fd < fdt->max_fdset) {
2496 if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, fd)) {
2497 /* while max_files and max_fdset might not line up, we should never
2498 * have a valid fdset higher than files */
2499 assert(fd < fdt->max_files);
2500 file = fdt->fd[fd].fd_file;
2501 chan = fdt->fd[fd].fd_chan;
2502 tap = fdt->fd[fd].fd_tap;
2503 fdt->fd[fd].fd_file = 0;
2504 fdt->fd[fd].fd_chan = 0;
2505 fdt->fd[fd].fd_tap = 0;
2506 CLR_BITMASK_BIT(fdt->open_fds->fds_bits, fd);
2507 if (fd < fdt->hint_min_fd)
2508 fdt->hint_min_fd = fd;
2512 spin_unlock(&fdt->lock);
2513 /* Need to decref/cclose outside of the lock; they could sleep */
2515 kref_put(&file->f_kref);
2519 kref_put(&tap->kref);
2523 void put_file_from_fd(struct fd_table *open_files, int file_desc)
2525 close_fd(open_files, file_desc);
2528 static int __get_fd(struct fd_table *open_files, int low_fd, bool must_use_low)
2532 bool update_hint = TRUE;
2533 if ((low_fd < 0) || (low_fd > NR_FILE_DESC_MAX))
2535 if (open_files->closed)
2536 return -EINVAL; /* won't matter, they are dying */
2537 if (must_use_low && GET_BITMASK_BIT(open_files->open_fds->fds_bits, low_fd))
2539 if (low_fd > open_files->hint_min_fd)
2540 update_hint = FALSE;
2542 low_fd = open_files->hint_min_fd;
2543 /* Loop until we have a valid slot (we grow the fd_array at the bottom of
2544 * the loop if we haven't found a slot in the current array */
2545 while (slot == -1) {
2546 for (low_fd; low_fd < open_files->max_fdset; low_fd++) {
2547 if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, low_fd))
2550 SET_BITMASK_BIT(open_files->open_fds->fds_bits, slot);
2551 assert(slot < open_files->max_files &&
2552 open_files->fd[slot].fd_file == 0);
2553 /* We know slot >= hint, since we started with the hint */
2555 open_files->hint_min_fd = slot + 1;
2559 if ((error = grow_fd_set(open_files)))
2566 /* Insert a file or chan (obj, chosen by vfs) into the fd group with fd_flags.
2567 * If must_use_low, then we have to insert at FD = low_fd. o/w we start looking
2568 * for empty slots at low_fd. */
2569 int insert_obj_fdt(struct fd_table *fdt, void *obj, int low_fd, int fd_flags,
2570 bool must_use_low, bool vfs)
2573 spin_lock(&fdt->lock);
2574 slot = __get_fd(fdt, low_fd, must_use_low);
2576 spin_unlock(&fdt->lock);
2579 assert(slot < fdt->max_files &&
2580 fdt->fd[slot].fd_file == 0);
2582 kref_get(&((struct file*)obj)->f_kref, 1);
2583 fdt->fd[slot].fd_file = obj;
2584 fdt->fd[slot].fd_chan = 0;
2586 chan_incref((struct chan*)obj);
2587 fdt->fd[slot].fd_file = 0;
2588 fdt->fd[slot].fd_chan = obj;
2590 fdt->fd[slot].fd_flags = fd_flags;
2591 spin_unlock(&fdt->lock);
2595 /* Inserts the file in the fd_table, returning the corresponding new file
2596 * descriptor, or an error code. We start looking for open fds from low_fd.
2598 * Passing cloexec is a bit cheap, since we might want to expand it to support
2599 * more FD options in the future. */
2600 int insert_file(struct fd_table *open_files, struct file *file, int low_fd,
2601 bool must, bool cloexec)
2603 return insert_obj_fdt(open_files, file, low_fd, cloexec ? FD_CLOEXEC : 0,
2607 /* Closes all open files. Mostly just a "put" for all files. If cloexec, it
2608 * will only close the FDs with FD_CLOEXEC (opened with O_CLOEXEC or fcntld).
2610 * Notes on concurrency:
2611 * - Can't hold spinlocks while we call cclose, since it might sleep eventually.
2612 * - We're called from proc_destroy, so we could have concurrent openers trying
2613 * to add to the group (other syscalls), hence the "closed" flag.
2614 * - dot and slash chans are dealt with in proc_free. its difficult to close
2615 * and zero those with concurrent syscalls, since those are a source of krefs.
2616 * - Once we lock and set closed, no further additions can happen. To simplify
2617 * our closes, we also allow multiple calls to this func (though that should
2618 * never happen with the current code). */
2619 void close_fdt(struct fd_table *fdt, bool cloexec)
2623 struct file_desc *to_close;
2626 to_close = kzmalloc(sizeof(struct file_desc) * fdt->max_files,
2628 spin_lock(&fdt->lock);
2630 spin_unlock(&fdt->lock);
2634 for (int i = 0; i < fdt->max_fdset; i++) {
2635 if (GET_BITMASK_BIT(fdt->open_fds->fds_bits, i)) {
2636 /* while max_files and max_fdset might not line up, we should never
2637 * have a valid fdset higher than files */
2638 assert(i < fdt->max_files);
2639 if (cloexec && !(fdt->fd[i].fd_flags & FD_CLOEXEC))
2641 file = fdt->fd[i].fd_file;
2642 chan = fdt->fd[i].fd_chan;
2643 to_close[idx].fd_tap = fdt->fd[i].fd_tap;
2644 fdt->fd[i].fd_tap = 0;
2646 fdt->fd[i].fd_file = 0;
2647 to_close[idx++].fd_file = file;
2649 fdt->fd[i].fd_chan = 0;
2650 to_close[idx++].fd_chan = chan;
2652 CLR_BITMASK_BIT(fdt->open_fds->fds_bits, i);
2655 /* it's just a hint, we can build back up from being 0 */
2656 fdt->hint_min_fd = 0;
2661 spin_unlock(&fdt->lock);
2662 /* We go through some hoops to close/decref outside the lock. Nice for not
2663 * holding the lock for a while; critical in case the decref/cclose sleeps
2665 for (int i = 0; i < idx; i++) {
2666 if (to_close[i].fd_file)
2667 kref_put(&to_close[i].fd_file->f_kref);
2669 cclose(to_close[i].fd_chan);
2670 if (to_close[i].fd_tap)
2671 kref_put(&to_close[i].fd_tap->kref);
2676 /* Inserts all of the files from src into dst, used by sys_fork(). */
2677 void clone_fdt(struct fd_table *src, struct fd_table *dst)
2683 spin_lock(&src->lock);
2685 spin_unlock(&src->lock);
2688 spin_lock(&dst->lock);
2690 warn("Destination closed before it opened");
2691 spin_unlock(&dst->lock);
2692 spin_unlock(&src->lock);
2695 while (src->max_files > dst->max_files) {
2696 ret = grow_fd_set(dst);
2698 set_error(-ret, "Failed to grow for a clone_fdt");
2699 spin_unlock(&dst->lock);
2700 spin_unlock(&src->lock);
2704 for (int i = 0; i < src->max_fdset; i++) {
2705 if (GET_BITMASK_BIT(src->open_fds->fds_bits, i)) {
2706 /* while max_files and max_fdset might not line up, we should never
2707 * have a valid fdset higher than files */
2708 assert(i < src->max_files);
2709 file = src->fd[i].fd_file;
2710 chan = src->fd[i].fd_chan;
2711 assert(i < dst->max_files && dst->fd[i].fd_file == 0);
2712 SET_BITMASK_BIT(dst->open_fds->fds_bits, i);
2713 dst->fd[i].fd_file = file;
2714 dst->fd[i].fd_chan = chan;
2716 kref_get(&file->f_kref, 1);
2721 dst->hint_min_fd = src->hint_min_fd;
2722 spin_unlock(&dst->lock);
2723 spin_unlock(&src->lock);
2726 static void __chpwd(struct fs_struct *fs_env, struct dentry *new_pwd)
2728 struct dentry *old_pwd;
2729 kref_get(&new_pwd->d_kref, 1);
2730 /* writer lock, make sure we replace pwd with ours. could also CAS.
2731 * readers don't lock at all, so they need to either loop, or we need to
2732 * delay releasing old_pwd til an RCU grace period. */
2733 spin_lock(&fs_env->lock);
2734 old_pwd = fs_env->pwd;
2735 fs_env->pwd = new_pwd;
2736 spin_unlock(&fs_env->lock);
2737 kref_put(&old_pwd->d_kref);
2740 /* Change the working directory of the given fs env (one per process, at this
2741 * point). Returns 0 for success, sets errno and returns -1 otherwise. */
2742 int do_chdir(struct fs_struct *fs_env, char *path)
2744 struct nameidata nd_r = {0}, *nd = &nd_r;
2746 error = path_lookup(path, LOOKUP_DIRECTORY, nd);
2752 /* nd->dentry is the place we want our PWD to be */
2753 __chpwd(fs_env, nd->dentry);
2758 int do_fchdir(struct fs_struct *fs_env, struct file *file)
2760 if ((file->f_dentry->d_inode->i_mode & __S_IFMT) != __S_IFDIR) {
2764 __chpwd(fs_env, file->f_dentry);
2768 /* Returns a null-terminated string of up to length cwd_l containing the
2769 * absolute path of fs_env, (up to fs_env's root). Be sure to kfree the char*
2770 * "kfree_this" when you are done with it. We do this since it's easier to
2771 * build this string going backwards. Note cwd_l is not a strlen, it's an
2773 char *do_getcwd(struct fs_struct *fs_env, char **kfree_this, size_t cwd_l)
2775 struct dentry *dentry = fs_env->pwd;
2777 char *path_start, *kbuf;
2783 kbuf = kmalloc(cwd_l, 0);
2789 kbuf[cwd_l - 1] = '\0';
2790 kbuf[cwd_l - 2] = '/';
2791 /* for each dentry in the path, all the way back to the root of fs_env, we
2792 * grab the dentry name, push path_start back enough, and write in the name,
2793 * using /'s to terminate. We skip the root, since we don't want its
2794 * actual name, just "/", which is set before each loop. */
2795 path_start = kbuf + cwd_l - 2; /* the last byte written */
2796 while (dentry != fs_env->root) {
2797 link_len = dentry->d_name.len; /* this does not count the \0 */
2798 if (path_start - (link_len + 2) < kbuf) {
2803 path_start -= link_len;
2804 memmove(path_start, dentry->d_name.name, link_len);
2807 dentry = dentry->d_parent;
2812 static void print_dir(struct dentry *dentry, char *buf, int depth)
2814 struct dentry *child_d;
2815 struct dirent next = {0};
2819 if (!S_ISDIR(dentry->d_inode->i_mode)) {
2820 warn("Thought this was only directories!!");
2823 /* Print this dentry */
2824 printk("%s%s/ nlink: %d\n", buf, dentry->d_name.name,
2825 dentry->d_inode->i_nlink);
2826 if (dentry->d_mount_point) {
2827 dentry = dentry->d_mounted_fs->mnt_root;
2831 /* Set buffer for our kids */
2833 dir = dentry_open(dentry, 0);
2835 panic("Filesystem seems inconsistent - unable to open a dir!");
2836 /* Process every child, recursing on directories */
2838 retval = dir->f_op->readdir(dir, &next);
2840 /* Skip .., ., and empty entries */
2841 if (!strcmp("..", next.d_name) || !strcmp(".", next.d_name) ||
2844 /* there is an entry, now get its dentry */
2845 child_d = do_lookup(dentry, next.d_name);
2847 panic("Inconsistent FS, dirent doesn't have a dentry!");
2848 /* Recurse for directories, or just print the name for others */
2849 switch (child_d->d_inode->i_mode & __S_IFMT) {
2851 print_dir(child_d, buf, depth + 1);
2854 printk("%s%s size(B): %d nlink: %d\n", buf, next.d_name,
2855 child_d->d_inode->i_size, child_d->d_inode->i_nlink);
2858 printk("%s%s -> %s\n", buf, next.d_name,
2859 child_d->d_inode->i_op->readlink(child_d));
2862 printk("%s%s (char device) nlink: %d\n", buf, next.d_name,
2863 child_d->d_inode->i_nlink);
2866 printk("%s%s (block device) nlink: %d\n", buf, next.d_name,
2867 child_d->d_inode->i_nlink);
2870 warn("Look around you! Unknown filetype!");
2872 kref_put(&child_d->d_kref);
2878 /* Reset buffer to the way it was */
2880 kref_put(&dir->f_kref);
2884 int ls_dash_r(char *path)
2886 struct nameidata nd_r = {0}, *nd = &nd_r;
2890 error = path_lookup(path, LOOKUP_ACCESS | LOOKUP_DIRECTORY, nd);
2895 print_dir(nd->dentry, buf, 0);
2900 /* Dummy ops, to catch weird operations we weren't expecting */
2901 int dummy_create(struct inode *dir, struct dentry *dentry, int mode,
2902 struct nameidata *nd)
2904 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2908 struct dentry *dummy_lookup(struct inode *dir, struct dentry *dentry,
2909 struct nameidata *nd)
2911 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2915 int dummy_link(struct dentry *old_dentry, struct inode *dir,
2916 struct dentry *new_dentry)
2918 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2922 int dummy_unlink(struct inode *dir, struct dentry *dentry)
2924 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2928 int dummy_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2930 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2934 int dummy_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2936 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2940 int dummy_rmdir(struct inode *dir, struct dentry *dentry)
2942 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2946 int dummy_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
2948 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2952 int dummy_rename(struct inode *old_dir, struct dentry *old_dentry,
2953 struct inode *new_dir, struct dentry *new_dentry)
2955 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2959 char *dummy_readlink(struct dentry *dentry)
2961 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2965 void dummy_truncate(struct inode *inode)
2967 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2970 int dummy_permission(struct inode *inode, int mode, struct nameidata *nd)
2972 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2976 int dummy_d_revalidate(struct dentry *dir, struct nameidata *nd)
2978 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2982 int dummy_d_hash(struct dentry *dentry, struct qstr *name)
2984 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2988 int dummy_d_compare(struct dentry *dir, struct qstr *name1, struct qstr *name2)
2990 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2994 int dummy_d_delete(struct dentry *dentry)
2996 printk("Dummy VFS function %s called!\n", __FUNCTION__);
3000 int dummy_d_release(struct dentry *dentry)
3002 printk("Dummy VFS function %s called!\n", __FUNCTION__);
3006 void dummy_d_iput(struct dentry *dentry, struct inode *inode)
3008 printk("Dummy VFS function %s called!\n", __FUNCTION__);
3011 struct inode_operations dummy_i_op = {
3026 struct dentry_operations dummy_d_op = {