1 /* Copyright (c) 2009, 2010 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * Default implementations and global values for the VFS. */
7 #include <vfs.h> // keep this first
20 struct sb_tailq super_blocks = TAILQ_HEAD_INITIALIZER(super_blocks);
21 spinlock_t super_blocks_lock = SPINLOCK_INITIALIZER;
22 struct fs_type_tailq file_systems = TAILQ_HEAD_INITIALIZER(file_systems);
23 struct namespace default_ns;
25 struct kmem_cache *dentry_kcache; // not to be confused with the dcache
26 struct kmem_cache *inode_kcache;
27 struct kmem_cache *file_kcache;
29 /* Mounts fs from dev_name at mnt_pt in namespace ns. There could be no mnt_pt,
30 * such as with the root of (the default) namespace. Not sure how it would work
31 * with multiple namespaces on the same FS yet. Note if you mount the same FS
32 * multiple times, you only have one FS still (and one SB). If we ever support
34 struct vfsmount *__mount_fs(struct fs_type *fs, char *dev_name,
35 struct dentry *mnt_pt, int flags,
38 struct super_block *sb;
39 struct vfsmount *vmnt = kmalloc(sizeof(struct vfsmount), 0);
41 /* this first ref is stored in the NS tailq below */
42 kref_init(&vmnt->mnt_kref, fake_release, 1);
43 /* Build the vfsmount, if there is no mnt_pt, mnt is the root vfsmount (for
44 * now). fields related to the actual FS, like the sb and the mnt_root are
45 * set in the fs-specific get_sb() call. */
47 vmnt->mnt_parent = NULL;
48 vmnt->mnt_mountpoint = NULL;
49 } else { /* common case, but won't be tested til we try to mount another FS */
50 mnt_pt->d_mount_point = TRUE;
51 mnt_pt->d_mounted_fs = vmnt;
52 kref_get(&vmnt->mnt_kref, 1); /* held by mnt_pt */
53 vmnt->mnt_parent = mnt_pt->d_sb->s_mount;
54 vmnt->mnt_mountpoint = mnt_pt;
56 TAILQ_INIT(&vmnt->mnt_child_mounts);
57 vmnt->mnt_flags = flags;
58 vmnt->mnt_devname = dev_name;
59 vmnt->mnt_namespace = ns;
60 kref_get(&ns->kref, 1); /* held by vmnt */
62 /* Read in / create the SB */
63 sb = fs->get_sb(fs, flags, dev_name, vmnt);
65 panic("You're FS sucks");
67 /* TODO: consider moving this into get_sb or something, in case the SB
68 * already exists (mounting again) (if we support that) */
69 spin_lock(&super_blocks_lock);
70 TAILQ_INSERT_TAIL(&super_blocks, sb, s_list); /* storing a ref here... */
71 spin_unlock(&super_blocks_lock);
73 /* Update holding NS */
75 TAILQ_INSERT_TAIL(&ns->vfsmounts, vmnt, mnt_list);
76 spin_unlock(&ns->lock);
77 /* note to self: so, right after this point, the NS points to the root FS
78 * mount (we return the mnt, which gets assigned), the root mnt has a dentry
79 * for /, backed by an inode, with a SB prepped and in memory. */
87 dentry_kcache = kmem_cache_create("dentry", sizeof(struct dentry),
88 __alignof__(struct dentry), 0, 0, 0);
89 inode_kcache = kmem_cache_create("inode", sizeof(struct inode),
90 __alignof__(struct inode), 0, 0, 0);
91 file_kcache = kmem_cache_create("file", sizeof(struct file),
92 __alignof__(struct file), 0, 0, 0);
93 /* default NS never dies, +1 to exist */
94 kref_init(&default_ns.kref, fake_release, 1);
95 spinlock_init(&default_ns.lock);
96 default_ns.root = NULL;
97 TAILQ_INIT(&default_ns.vfsmounts);
99 /* build list of all FS's in the system. put yours here. if this is ever
100 * done on the fly, we'll need to lock. */
101 TAILQ_INSERT_TAIL(&file_systems, &kfs_fs_type, list);
103 TAILQ_INSERT_TAIL(&file_systems, &ext2_fs_type, list);
105 TAILQ_FOREACH(fs, &file_systems, list)
106 printk("Supports the %s Filesystem\n", fs->name);
108 /* mounting KFS at the root (/), pending root= parameters */
109 // TODO: linux creates a temp root_fs, then mounts the real root onto that
110 default_ns.root = __mount_fs(&kfs_fs_type, "RAM", NULL, 0, &default_ns);
112 printk("vfs_init() completed\n");
115 /* Builds / populates the qstr of a dentry based on its d_iname. If there is an
116 * l_name, (long), it will use that instead of the inline name. This will
117 * probably change a bit. */
118 void qstr_builder(struct dentry *dentry, char *l_name)
120 dentry->d_name.name = l_name ? l_name : dentry->d_iname;
121 // TODO: pending what we actually do in d_hash
122 //dentry->d_name.hash = dentry->d_op->d_hash(dentry, &dentry->d_name);
123 dentry->d_name.hash = 0xcafebabe;
124 dentry->d_name.len = strnlen(dentry->d_name.name, MAX_FILENAME_SZ);
127 /* Useful little helper - return the string ptr for a given file */
128 char *file_name(struct file *file)
130 return file->f_dentry->d_name.name;
133 /* Some issues with this, coupled closely to fs_lookup.
135 * Note the use of __dentry_free, instead of kref_put. In those cases, we don't
136 * want to treat it like a kref and we have the only reference to it, so it is
137 * okay to do this. It makes dentry_release() easier too. */
138 static struct dentry *do_lookup(struct dentry *parent, char *name)
140 struct dentry *result, *query;
141 query = get_dentry(parent->d_sb, parent, name);
143 warn("OOM in do_lookup(), probably wasn't expected\n");
146 result = dcache_get(parent->d_sb, query);
148 __dentry_free(query);
151 /* No result, check for negative */
152 if (query->d_flags & DENTRY_NEGATIVE) {
153 __dentry_free(query);
156 /* not in the dcache at all, need to consult the FS */
157 result = parent->d_inode->i_op->lookup(parent->d_inode, query, 0);
159 /* Note the USED flag will get turned off when this gets added to the
160 * LRU in dentry_release(). There's a slight race here that we'll panic
161 * on, but I want to catch it (in dcache_put()) for now. */
162 query->d_flags |= DENTRY_NEGATIVE;
163 dcache_put(parent->d_sb, query);
164 kref_put(&query->d_kref);
167 dcache_put(parent->d_sb, result);
168 /* This is because KFS doesn't return the same dentry, but ext2 does. this
169 * is ugly and needs to be fixed. (TODO) */
171 __dentry_free(query);
173 /* TODO: if the following are done by us, how do we know the i_ino?
174 * also need to handle inodes that are already read in! For now, we're
175 * going to have the FS handle it in it's lookup() method:
177 * - read in the inode
178 * - put in the inode cache */
182 /* Update ND such that it represents having followed dentry. IAW the nd
183 * refcnting rules, we need to decref any references that were in there before
184 * they get clobbered. */
185 static int next_link(struct dentry *dentry, struct nameidata *nd)
187 assert(nd->dentry && nd->mnt);
188 /* update the dentry */
189 kref_get(&dentry->d_kref, 1);
190 kref_put(&nd->dentry->d_kref);
192 /* update the mount, if we need to */
193 if (dentry->d_sb->s_mount != nd->mnt) {
194 kref_get(&dentry->d_sb->s_mount->mnt_kref, 1);
195 kref_put(&nd->mnt->mnt_kref);
196 nd->mnt = dentry->d_sb->s_mount;
201 /* Walk up one directory, being careful of mountpoints, namespaces, and the top
203 static int climb_up(struct nameidata *nd)
205 printd("CLIMB_UP, from %s\n", nd->dentry->d_name.name);
206 /* Top of the world, just return. Should also check for being at the top of
207 * the current process's namespace (TODO) */
208 if (!nd->dentry->d_parent || (nd->dentry->d_parent == nd->dentry))
210 /* Check if we are at the top of a mount, if so, we need to follow
211 * backwards, and then climb_up from that one. We might need to climb
212 * multiple times if we mount multiple FSs at the same spot (highly
213 * unlikely). This is completely untested. Might recurse instead. */
214 while (nd->mnt->mnt_root == nd->dentry) {
215 if (!nd->mnt->mnt_parent) {
216 warn("Might have expected a parent vfsmount (dentry had a parent)");
219 next_link(nd->mnt->mnt_mountpoint, nd);
221 /* Backwards walk (no mounts or any other issues now). */
222 next_link(nd->dentry->d_parent, nd);
223 printd("CLIMB_UP, to %s\n", nd->dentry->d_name.name);
227 /* nd->dentry might be on a mount point, so we need to move on to the child
229 static int follow_mount(struct nameidata *nd)
231 if (!nd->dentry->d_mount_point)
233 next_link(nd->dentry->d_mounted_fs->mnt_root, nd);
237 static int link_path_walk(char *path, struct nameidata *nd);
239 /* When nd->dentry is for a symlink, this will recurse and follow that symlink,
240 * so that nd contains the results of following the symlink (dentry and mnt).
241 * Returns when it isn't a symlink, 1 on following a link, and < 0 on error. */
242 static int follow_symlink(struct nameidata *nd)
246 if (!S_ISLNK(nd->dentry->d_inode->i_mode))
248 if (nd->depth > MAX_SYMLINK_DEPTH)
250 printd("Following symlink for dentry %p %s\n", nd->dentry,
251 nd->dentry->d_name.name);
253 symname = nd->dentry->d_inode->i_op->readlink(nd->dentry);
254 /* We need to pin in nd->dentry (the dentry of the symlink), since we need
255 * it's symname's storage to stay in memory throughout the upcoming
256 * link_path_walk(). The last_sym gets decreffed when we path_release() or
257 * follow another symlink. */
259 kref_put(&nd->last_sym->d_kref);
260 kref_get(&nd->dentry->d_kref, 1);
261 nd->last_sym = nd->dentry;
262 /* If this an absolute path in the symlink, we need to free the old path and
263 * start over, otherwise, we continue from the PARENT of nd (the symlink) */
264 if (symname[0] == '/') {
267 nd->dentry = default_ns.root->mnt_root;
269 nd->dentry = current->fs_env.root;
270 nd->mnt = nd->dentry->d_sb->s_mount;
271 kref_get(&nd->mnt->mnt_kref, 1);
272 kref_get(&nd->dentry->d_kref, 1);
276 /* either way, keep on walking in the free world! */
277 retval = link_path_walk(symname, nd);
278 return (retval == 0 ? 1 : retval);
281 /* Little helper, to make it easier to break out of the nested loops. Will also
282 * '\0' out the first slash if it's slashes all the way down. Or turtles. */
283 static bool packed_trailing_slashes(char *first_slash)
285 for (char *i = first_slash; *i == '/'; i++) {
286 if (*(i + 1) == '\0') {
294 /* Simple helper to set nd to track it's last name to be Name. Also be careful
295 * with the storage of name. Don't use and nd's name past the lifetime of the
296 * string used in the path_lookup()/link_path_walk/whatever. Consider replacing
297 * parts of this with a qstr builder. Note this uses the dentry's d_op, which
298 * might not be the dentry we care about. */
299 static void stash_nd_name(struct nameidata *nd, char *name)
301 nd->last.name = name;
302 nd->last.len = strlen(name);
303 nd->last.hash = nd->dentry->d_op->d_hash(nd->dentry, &nd->last);
306 /* Resolves the links in a basic path walk. 0 for success, -EWHATEVER
307 * otherwise. The final lookup is returned via nd. */
308 static int link_path_walk(char *path, struct nameidata *nd)
310 struct dentry *link_dentry;
311 struct inode *link_inode, *nd_inode;
316 /* Prevent crazy recursion */
317 if (nd->depth > MAX_SYMLINK_DEPTH)
319 /* skip all leading /'s */
322 /* if there's nothing left (null terminated), we're done. This should only
323 * happen for "/", which if we wanted a PARENT, should fail (there is no
326 if (nd->flags & LOOKUP_PARENT) {
330 /* o/w, we're good */
333 /* iterate through each intermediate link of the path. in general, nd
334 * tracks where we are in the path, as far as dentries go. once we have the
335 * next dentry, we try to update nd based on that dentry. link is the part
336 * of the path string that we are looking up */
338 nd_inode = nd->dentry->d_inode;
339 if ((error = check_perms(nd_inode, nd->intent)))
341 /* find the next link, break out if it is the end */
342 next_slash = strchr(link, '/');
346 if (packed_trailing_slashes(next_slash)) {
347 nd->flags |= LOOKUP_DIRECTORY;
351 /* skip over any interim ./ */
352 if (!strncmp("./", link, 2))
354 /* Check for "../", walk up */
355 if (!strncmp("../", link, 3)) {
360 link_dentry = do_lookup(nd->dentry, link);
364 /* make link_dentry the current step/answer */
365 next_link(link_dentry, nd);
366 kref_put(&link_dentry->d_kref); /* do_lookup gave us a refcnt dentry */
367 /* we could be on a mountpoint or a symlink - need to follow them */
369 if ((error = follow_symlink(nd)) < 0)
371 /* Turn off a possible DIRECTORY lookup, which could have been set
372 * during the follow_symlink (a symlink could have had a directory at
373 * the end), though it was in the middle of the real path. */
374 nd->flags &= ~LOOKUP_DIRECTORY;
375 if (!S_ISDIR(nd->dentry->d_inode->i_mode))
378 /* move through the path string to the next entry */
379 link = next_slash + 1;
380 /* advance past any other interim slashes. we know we won't hit the end
381 * due to the for loop check above */
385 /* Now, we're on the last link of the path. We need to deal with with . and
386 * .. . This might be weird with PARENT lookups - not sure what semantics
387 * we want exactly. This will give the parent of whatever the PATH was
388 * supposed to look like. Note that ND currently points to the parent of
389 * the last item (link). */
390 if (!strcmp(".", link)) {
391 if (nd->flags & LOOKUP_PARENT) {
392 assert(nd->dentry->d_name.name);
393 stash_nd_name(nd, nd->dentry->d_name.name);
398 if (!strcmp("..", link)) {
400 if (nd->flags & LOOKUP_PARENT) {
401 assert(nd->dentry->d_name.name);
402 stash_nd_name(nd, nd->dentry->d_name.name);
407 /* need to attempt to look it up, in case it's a symlink */
408 link_dentry = do_lookup(nd->dentry, link);
410 /* if there's no dentry, we are okay if we are looking for the parent */
411 if (nd->flags & LOOKUP_PARENT) {
412 assert(strcmp(link, ""));
413 stash_nd_name(nd, link);
419 next_link(link_dentry, nd);
420 kref_put(&link_dentry->d_kref); /* do_lookup gave us a refcnt'd dentry */
421 /* at this point, nd is on the final link, but it might be a symlink */
422 if (nd->flags & LOOKUP_FOLLOW) {
423 error = follow_symlink(nd);
426 /* if we actually followed a symlink, then nd is set and we're done */
430 /* One way or another, nd is on the last element of the path, symlinks and
431 * all. Now we need to climb up to set nd back on the parent, if that's
433 if (nd->flags & LOOKUP_PARENT) {
434 assert(nd->dentry->d_name.name);
435 stash_nd_name(nd, link_dentry->d_name.name);
439 /* now, we have the dentry set, and don't want the parent, but might be on a
440 * mountpoint still. FYI: this hasn't been thought through completely. */
442 /* If we wanted a directory, but didn't get one, error out */
443 if ((nd->flags & LOOKUP_DIRECTORY) && !S_ISDIR(nd->dentry->d_inode->i_mode))
448 /* Given path, return the inode for the final dentry. The ND should be
449 * initialized for the first call - specifically, we need the intent.
450 * LOOKUP_PARENT and friends go in the flags var, which is not the intent.
452 * If path_lookup wants a PARENT, but hits the top of the FS (root or
453 * otherwise), we want it to error out. It's still unclear how we want to
454 * handle processes with roots that aren't root, but at the very least, we don't
455 * want to think we have the parent of /, but have / itself. Due to the way
456 * link_path_walk works, if that happened, we probably don't have a
457 * nd->last.name. This needs more thought (TODO).
459 * Need to be careful too. While the path has been copied-in to the kernel,
460 * it's still user input. */
461 int path_lookup(char *path, int flags, struct nameidata *nd)
464 printd("Path lookup for %s\n", path);
465 /* we allow absolute lookups with no process context */
466 if (path[0] == '/') { /* absolute lookup */
468 nd->dentry = default_ns.root->mnt_root;
470 nd->dentry = current->fs_env.root;
471 } else { /* relative lookup */
473 /* Don't need to lock on the fs_env since we're reading one item */
474 nd->dentry = current->fs_env.pwd;
476 nd->mnt = nd->dentry->d_sb->s_mount;
477 /* Whenever references get put in the nd, incref them. Whenever they are
478 * removed, decref them. */
479 kref_get(&nd->mnt->mnt_kref, 1);
480 kref_get(&nd->dentry->d_kref, 1);
482 nd->depth = 0; /* used in symlink following */
483 retval = link_path_walk(path, nd);
484 /* make sure our PARENT lookup worked */
485 if (!retval && (flags & LOOKUP_PARENT))
486 assert(nd->last.name);
490 /* Call this after any use of path_lookup when you are done with its results,
491 * regardless of whether it succeeded or not. It will free any references */
492 void path_release(struct nameidata *nd)
494 kref_put(&nd->dentry->d_kref);
495 kref_put(&nd->mnt->mnt_kref);
496 /* Free the last symlink dentry used, if there was one */
498 kref_put(&nd->last_sym->d_kref);
499 nd->last_sym = 0; /* catch reuse bugs */
503 /* External version of mount, only call this after having a / mount */
504 int mount_fs(struct fs_type *fs, char *dev_name, char *path, int flags)
506 struct nameidata nd_r = {0}, *nd = &nd_r;
508 retval = path_lookup(path, LOOKUP_DIRECTORY, nd);
511 /* taking the namespace of the vfsmount of path */
512 if (!__mount_fs(fs, dev_name, nd->dentry, flags, nd->mnt->mnt_namespace))
519 /* Superblock functions */
521 /* Dentry "hash" function for the hash table to use. Since we already have the
522 * hash in the qstr, we don't need to rehash. Also, note we'll be using the
523 * dentry in question as both the key and the value. */
524 static size_t __dcache_hash(void *k)
526 return (size_t)((struct dentry*)k)->d_name.hash;
529 /* Dentry cache hashtable equality function. This means we need to pass in some
530 * minimal dentry when doing a lookup. */
531 static ssize_t __dcache_eq(void *k1, void *k2)
533 if (((struct dentry*)k1)->d_parent != ((struct dentry*)k2)->d_parent)
535 /* TODO: use the FS-specific string comparison */
536 return !strcmp(((struct dentry*)k1)->d_name.name,
537 ((struct dentry*)k2)->d_name.name);
540 /* Helper to alloc and initialize a generic superblock. This handles all the
541 * VFS related things, like lists. Each FS will need to handle its own things
542 * in it's *_get_sb(), usually involving reading off the disc. */
543 struct super_block *get_sb(void)
545 struct super_block *sb = kmalloc(sizeof(struct super_block), 0);
547 spinlock_init(&sb->s_lock);
548 kref_init(&sb->s_kref, fake_release, 1); /* for the ref passed out */
549 TAILQ_INIT(&sb->s_inodes);
550 TAILQ_INIT(&sb->s_dirty_i);
551 TAILQ_INIT(&sb->s_io_wb);
552 TAILQ_INIT(&sb->s_lru_d);
553 TAILQ_INIT(&sb->s_files);
554 sb->s_dcache = create_hashtable(100, __dcache_hash, __dcache_eq);
555 sb->s_icache = create_hashtable(100, __generic_hash, __generic_eq);
556 spinlock_init(&sb->s_lru_lock);
557 spinlock_init(&sb->s_dcache_lock);
558 spinlock_init(&sb->s_icache_lock);
559 sb->s_fs_info = 0; // can override somewhere else
563 /* Final stages of initializing a super block, including creating and linking
564 * the root dentry, root inode, vmnt, and sb. The d_op and root_ino are
565 * FS-specific, but otherwise it's FS-independent, tricky, and not worth having
566 * around multiple times.
568 * Not the world's best interface, so it's subject to change, esp since we're
569 * passing (now 3) FS-specific things. */
570 void init_sb(struct super_block *sb, struct vfsmount *vmnt,
571 struct dentry_operations *d_op, unsigned long root_ino,
574 /* Build and init the first dentry / inode. The dentry ref is stored later
575 * by vfsmount's mnt_root. The parent is dealt with later. */
576 struct dentry *d_root = get_dentry(sb, 0, "/"); /* probably right */
579 panic("OOM! init_sb() can't fail yet!");
580 /* a lot of here on down is normally done in lookup() or create, since
581 * get_dentry isn't a fully usable dentry. The two FS-specific settings are
582 * normally inherited from a parent within the same FS in get_dentry, but we
585 d_root->d_fs_info = d_fs_info;
586 struct inode *inode = get_inode(d_root);
588 panic("This FS sucks!");
589 inode->i_ino = root_ino;
590 /* TODO: add the inode to the appropriate list (off i_list) */
591 /* TODO: do we need to read in the inode? can we do this on demand? */
592 /* if this FS is already mounted, we'll need to do something different. */
593 sb->s_op->read_inode(inode);
594 icache_put(sb, inode);
595 /* Link the dentry and SB to the VFS mount */
596 vmnt->mnt_root = d_root; /* ref comes from get_dentry */
598 /* If there is no mount point, there is no parent. This is true only for
600 if (vmnt->mnt_mountpoint) {
601 kref_get(&vmnt->mnt_mountpoint->d_kref, 1); /* held by d_root */
602 d_root->d_parent = vmnt->mnt_mountpoint; /* dentry of the root */
604 d_root->d_parent = d_root; /* set root as its own parent */
606 /* insert the dentry into the dentry cache. when's the earliest we can?
607 * when's the earliest we should? what about concurrent accesses to the
608 * same dentry? should be locking the dentry... */
609 dcache_put(sb, d_root);
610 kref_put(&inode->i_kref); /* give up the ref from get_inode() */
613 /* Dentry Functions */
615 /* Helper to alloc and initialize a generic dentry. The following needs to be
616 * set still: d_op (if no parent), d_fs_info (opt), d_inode, connect the inode
617 * to the dentry (and up the d_kref again), maybe dcache_put(). The inode
618 * stitching is done in get_inode() or lookup (depending on the FS).
619 * The setting of the d_op might be problematic when dealing with mounts. Just
622 * If the name is longer than the inline name, it will kmalloc a buffer, so
623 * don't worry about the storage for *name after calling this. */
624 struct dentry *get_dentry(struct super_block *sb, struct dentry *parent,
628 size_t name_len = strnlen(name, MAX_FILENAME_SZ); /* not including \0! */
629 struct dentry *dentry = kmem_cache_alloc(dentry_kcache, 0);
636 //memset(dentry, 0, sizeof(struct dentry));
637 kref_init(&dentry->d_kref, dentry_release, 1); /* this ref is returned */
638 spinlock_init(&dentry->d_lock);
639 TAILQ_INIT(&dentry->d_subdirs);
641 kref_get(&sb->s_kref, 1);
642 dentry->d_sb = sb; /* storing a ref here... */
643 dentry->d_mount_point = FALSE;
644 dentry->d_mounted_fs = 0;
645 if (parent) { /* no parent for rootfs mount */
646 kref_get(&parent->d_kref, 1);
647 dentry->d_op = parent->d_op; /* d_op set in init_sb for parentless */
649 dentry->d_parent = parent;
650 dentry->d_flags = DENTRY_USED;
651 dentry->d_fs_info = 0;
652 if (name_len < DNAME_INLINE_LEN) {
653 strncpy(dentry->d_iname, name, name_len);
654 dentry->d_iname[name_len] = '\0';
655 qstr_builder(dentry, 0);
657 l_name = kmalloc(name_len + 1, 0);
659 strncpy(l_name, name, name_len);
660 l_name[name_len] = '\0';
661 qstr_builder(dentry, l_name);
663 /* Catch bugs by aggressively zeroing this (o/w we use old stuff) */
668 /* Called when the dentry is unreferenced (after kref == 0). This works closely
669 * with the resurrection in dcache_get().
671 * The dentry is still in the dcache, but needs to be un-USED and added to the
672 * LRU dentry list. Even dentries that were used in a failed lookup need to be
673 * cached - they ought to be the negative dentries. Note that all dentries have
674 * parents, even negative ones (it is needed to find it in the dcache). */
675 void dentry_release(struct kref *kref)
677 struct dentry *dentry = container_of(kref, struct dentry, d_kref);
679 printd("'Releasing' dentry %p: %s\n", dentry, dentry->d_name.name);
680 /* DYING dentries (recently unlinked / rmdir'd) just get freed */
681 if (dentry->d_flags & DENTRY_DYING) {
682 __dentry_free(dentry);
685 /* This lock ensures the USED state and the TAILQ membership is in sync.
686 * Also used to check the refcnt, though that might not be necessary. */
687 spin_lock(&dentry->d_lock);
688 /* While locked, we need to double check the kref, in case someone already
689 * reup'd it. Re-up? you're crazy! Reee-up, you're outta yo mind! */
690 if (!kref_refcnt(&dentry->d_kref)) {
691 /* Note this is where negative dentries get set UNUSED */
692 if (dentry->d_flags & DENTRY_USED) {
693 dentry->d_flags &= ~DENTRY_USED;
694 spin_lock(&dentry->d_sb->s_lru_lock);
695 TAILQ_INSERT_TAIL(&dentry->d_sb->s_lru_d, dentry, d_lru);
696 spin_unlock(&dentry->d_sb->s_lru_lock);
698 /* and make sure it wasn't USED, then UNUSED again */
699 /* TODO: think about issues with this */
700 warn("This should be rare. Tell brho this happened.");
703 spin_unlock(&dentry->d_lock);
706 /* Called when we really dealloc and get rid of a dentry (like when it is
707 * removed from the dcache, either for memory or correctness reasons)
709 * This has to handle two types of dentries: full ones (ones that had been used)
710 * and ones that had been just for lookups - hence the check for d_inode.
712 * Note that dentries pin and kref their inodes. When all the dentries are
713 * gone, we want the inode to be released via kref. The inode has internal /
714 * weak references to the dentry, which are not refcounted. */
715 void __dentry_free(struct dentry *dentry)
718 printk("Freeing dentry %p: %s\n", dentry, dentry->d_name.name);
719 assert(dentry->d_op); /* catch bugs. a while back, some lacked d_op */
720 dentry->d_op->d_release(dentry);
721 /* TODO: check/test the boundaries on this. */
722 if (dentry->d_name.len > DNAME_INLINE_LEN)
723 kfree((void*)dentry->d_name.name);
724 kref_put(&dentry->d_sb->s_kref);
725 if (dentry->d_parent)
726 kref_put(&dentry->d_parent->d_kref);
727 if (dentry->d_mounted_fs)
728 kref_put(&dentry->d_mounted_fs->mnt_kref);
729 if (dentry->d_inode) {
730 TAILQ_REMOVE(&dentry->d_inode->i_dentry, dentry, d_alias);
731 kref_put(&dentry->d_inode->i_kref); /* dentries kref inodes */
733 kmem_cache_free(dentry_kcache, dentry);
736 /* Looks up the dentry for the given path, returning a refcnt'd dentry (or 0).
737 * Permissions are applied for the current user, which is quite a broken system
738 * at the moment. Flags are lookup flags. */
739 struct dentry *lookup_dentry(char *path, int flags)
741 struct dentry *dentry;
742 struct nameidata nd_r = {0}, *nd = &nd_r;
745 error = path_lookup(path, flags, nd);
752 kref_get(&dentry->d_kref, 1);
757 /* Get a dentry from the dcache. At a minimum, we need the name hash and parent
758 * in what_i_want, though most uses will probably be from a get_dentry() call.
759 * We pass in the SB in the off chance that we don't want to use a get'd dentry.
761 * The unusual variable name (instead of just "key" or something) is named after
762 * ex-SPC Castro's porn folder. Caller deals with the memory for what_i_want.
764 * If the dentry is negative, we don't return the actual result - instead, we
765 * set the negative flag in 'what i want'. The reason is we don't want to
766 * kref_get() and then immediately put (causing dentry_release()). This also
767 * means that dentry_release() should never get someone who wasn't USED (barring
768 * the race, which it handles). And we don't need to ever have a dentry set as
769 * USED and NEGATIVE (which is always wrong, but would be needed for a cleaner
772 * This is where we do the "kref resurrection" - we are returning a kref'd
773 * object, even if it wasn't kref'd before. This means the dcache does NOT hold
774 * krefs (it is a weak/internal ref), but it is a source of kref generation. We
775 * sync up with the possible freeing of the dentry by locking the table. See
776 * Doc/kref for more info. */
777 struct dentry *dcache_get(struct super_block *sb, struct dentry *what_i_want)
779 struct dentry *found;
780 /* This lock protects the hash, as well as ensures the returned object
781 * doesn't get deleted/freed out from under us */
782 spin_lock(&sb->s_dcache_lock);
783 found = hashtable_search(sb->s_dcache, what_i_want);
785 if (found->d_flags & DENTRY_NEGATIVE) {
786 what_i_want->d_flags |= DENTRY_NEGATIVE;
787 spin_unlock(&sb->s_dcache_lock);
790 spin_lock(&found->d_lock);
791 __kref_get(&found->d_kref, 1); /* prob could be done outside the lock*/
792 /* If we're here (after kreffing) and it is not USED, we are the one who
793 * should resurrect */
794 if (!(found->d_flags & DENTRY_USED)) {
795 found->d_flags |= DENTRY_USED;
796 spin_lock(&sb->s_lru_lock);
797 TAILQ_REMOVE(&sb->s_lru_d, found, d_lru);
798 spin_unlock(&sb->s_lru_lock);
800 spin_unlock(&found->d_lock);
802 spin_unlock(&sb->s_dcache_lock);
806 /* Adds a dentry to the dcache. Note the *dentry is both the key and the value.
807 * If the value was already in there (which can happen iff it was negative), for
808 * now we'll remove it and put the new one in there. */
809 void dcache_put(struct super_block *sb, struct dentry *key_val)
813 spin_lock(&sb->s_dcache_lock);
814 old = hashtable_remove(sb->s_dcache, key_val);
816 assert(old->d_flags & DENTRY_NEGATIVE);
817 /* This is possible, but rare for now (about to be put on the LRU) */
818 assert(!(old->d_flags & DENTRY_USED));
819 assert(!kref_refcnt(&old->d_kref));
820 spin_lock(&sb->s_lru_lock);
821 TAILQ_REMOVE(&sb->s_lru_d, old, d_lru);
822 spin_unlock(&sb->s_lru_lock);
825 /* this returns 0 on failure (TODO: Fix this ghetto shit) */
826 retval = hashtable_insert(sb->s_dcache, key_val, key_val);
828 spin_unlock(&sb->s_dcache_lock);
831 /* Will remove and return the dentry. Caller deallocs the key, but the retval
832 * won't have a reference. * Returns 0 if it wasn't found. Callers can't
833 * assume much - they should not use the reference they *get back*, (if they
834 * already had one for key, they can use that). There may be other users out
836 struct dentry *dcache_remove(struct super_block *sb, struct dentry *key)
838 struct dentry *retval;
839 spin_lock(&sb->s_dcache_lock);
840 retval = hashtable_remove(sb->s_dcache, key);
841 spin_unlock(&sb->s_dcache_lock);
845 /* This will clean out the LRU list, which are the unused dentries of the dentry
846 * cache. This will optionally only free the negative ones. Note that we grab
847 * the hash lock for the time we traverse the LRU list - this prevents someone
848 * from getting a kref from the dcache, which could cause us trouble (we rip
849 * someone off the list, who isn't unused, and they try to rip them off the
851 void dcache_prune(struct super_block *sb, bool negative_only)
853 struct dentry *d_i, *temp;
854 struct dentry_tailq victims = TAILQ_HEAD_INITIALIZER(victims);
856 spin_lock(&sb->s_dcache_lock);
857 spin_lock(&sb->s_lru_lock);
858 TAILQ_FOREACH_SAFE(d_i, &sb->s_lru_d, d_lru, temp) {
859 if (!(d_i->d_flags & DENTRY_USED)) {
860 if (negative_only && !(d_i->d_flags & DENTRY_NEGATIVE))
862 /* another place where we'd be better off with tools, not sol'ns */
863 hashtable_remove(sb->s_dcache, d_i);
864 TAILQ_REMOVE(&sb->s_lru_d, d_i, d_lru);
865 TAILQ_INSERT_HEAD(&victims, d_i, d_lru);
868 spin_unlock(&sb->s_lru_lock);
869 spin_unlock(&sb->s_dcache_lock);
870 /* Now do the actual freeing, outside of the hash/LRU list locks. This is
871 * necessary since __dentry_free() will decref its parent, which may get
872 * released and try to add itself to the LRU. */
873 TAILQ_FOREACH_SAFE(d_i, &victims, d_lru, temp) {
874 TAILQ_REMOVE(&victims, d_i, d_lru);
875 assert(!kref_refcnt(&d_i->d_kref));
878 /* It is possible at this point that there are new items on the LRU. We
879 * could loop back until that list is empty, if we care about this. */
882 /* Inode Functions */
884 /* Creates and initializes a new inode. Generic fields are filled in.
885 * FS-specific fields are filled in by the callout. Specific fields are filled
886 * in in read_inode() based on what's on the disk for a given i_no, or when the
887 * inode is created (for new objects).
889 * i_no is set by the caller. Note that this means this inode can be for an
890 * inode that is already on disk, or it can be used when creating. */
891 struct inode *get_inode(struct dentry *dentry)
893 struct super_block *sb = dentry->d_sb;
894 /* FS allocs and sets the following: i_op, i_fop, i_pm.pm_op, and any FS
896 struct inode *inode = sb->s_op->alloc_inode(sb);
901 TAILQ_INSERT_HEAD(&sb->s_inodes, inode, i_sb_list); /* weak inode ref */
902 TAILQ_INIT(&inode->i_dentry);
903 TAILQ_INSERT_TAIL(&inode->i_dentry, dentry, d_alias); /* weak dentry ref*/
904 /* one for the dentry->d_inode, one passed out */
905 kref_init(&inode->i_kref, inode_release, 2);
906 dentry->d_inode = inode;
907 inode->i_ino = 0; /* set by caller later */
908 inode->i_blksize = sb->s_blocksize;
909 spinlock_init(&inode->i_lock);
910 kref_get(&sb->s_kref, 1); /* could allow the dentry to pin it */
912 inode->i_rdev = 0; /* this has no real meaning yet */
913 inode->i_bdev = sb->s_bdev; /* storing an uncounted ref */
914 inode->i_state = 0; /* need real states, like I_NEW */
915 inode->dirtied_when = 0;
917 atomic_set(&inode->i_writecount, 0);
918 /* Set up the page_map structures. Default is to use the embedded one.
919 * Might push some of this back into specific FSs. For now, the FS tells us
920 * what pm_op they want via i_pm.pm_op, which we set again in pm_init() */
921 inode->i_mapping = &inode->i_pm;
922 pm_init(inode->i_mapping, inode->i_pm.pm_op, inode);
926 /* Helper: loads/ reads in the inode numbered ino and attaches it to dentry */
927 void load_inode(struct dentry *dentry, unsigned long ino)
931 /* look it up in the inode cache first */
932 inode = icache_get(dentry->d_sb, ino);
934 /* connect the dentry to its inode */
935 TAILQ_INSERT_TAIL(&inode->i_dentry, dentry, d_alias);
936 dentry->d_inode = inode; /* storing the ref we got from icache_get */
939 /* otherwise, we need to do it manually */
940 inode = get_inode(dentry);
942 dentry->d_sb->s_op->read_inode(inode);
943 /* TODO: race here, two creators could miss in the cache, and then get here.
944 * need a way to sync across a blocking call. needs to be either at this
945 * point in the code or per the ino (dentries could be different) */
946 icache_put(dentry->d_sb, inode);
947 kref_put(&inode->i_kref);
950 /* Helper op, used when creating regular files, directories, symlinks, etc.
951 * Note we make a distinction between the mode and the file type (for now).
952 * After calling this, call the FS specific version (create or mkdir), which
953 * will set the i_ino, the filetype, and do any other FS-specific stuff. Also
954 * note that a lot of inode stuff was initialized in get_inode/alloc_inode. The
955 * stuff here is pertinent to the specific creator (user), mode, and time. Also
956 * note we don't pass this an nd, like Linux does... */
957 static struct inode *create_inode(struct dentry *dentry, int mode)
959 /* note it is the i_ino that uniquely identifies a file in the specific
960 * filesystem. there's a diff between creating an inode (even for an in-use
961 * ino) and then filling it in, and vs creating a brand new one.
962 * get_inode() sets it to 0, and it should be filled in later in an
963 * FS-specific manner. */
964 struct inode *inode = get_inode(dentry);
967 inode->i_mode = mode & S_PMASK; /* note that after this, we have no type */
971 inode->i_atime.tv_sec = 0; /* TODO: now! */
972 inode->i_ctime.tv_sec = 0;
973 inode->i_mtime.tv_sec = 0;
974 inode->i_atime.tv_nsec = 0; /* are these supposed to be the extra ns? */
975 inode->i_ctime.tv_nsec = 0;
976 inode->i_mtime.tv_nsec = 0;
977 inode->i_bdev = inode->i_sb->s_bdev;
978 /* when we have notions of users, do something here: */
984 /* Create a new disk inode in dir associated with dentry, with the given mode.
985 * called when creating a regular file. dir is the directory/parent. dentry is
986 * the dentry of the inode we are creating. Note the lack of the nd... */
987 int create_file(struct inode *dir, struct dentry *dentry, int mode)
989 struct inode *new_file = create_inode(dentry, mode);
992 dir->i_op->create(dir, dentry, mode, 0);
993 icache_put(new_file->i_sb, new_file);
994 kref_put(&new_file->i_kref);
998 /* Creates a new inode for a directory associated with dentry in dir with the
1000 int create_dir(struct inode *dir, struct dentry *dentry, int mode)
1002 struct inode *new_dir = create_inode(dentry, mode);
1005 dir->i_op->mkdir(dir, dentry, mode);
1006 dir->i_nlink++; /* Directories get a hardlink for every child dir */
1007 /* Make sure my parent tracks me. This is okay, since no directory (dir)
1008 * can have more than one dentry */
1009 struct dentry *parent = TAILQ_FIRST(&dir->i_dentry);
1010 assert(parent && parent == TAILQ_LAST(&dir->i_dentry, dentry_tailq));
1011 /* parent dentry tracks dentry as a subdir, weak reference */
1012 TAILQ_INSERT_TAIL(&parent->d_subdirs, dentry, d_subdirs_link);
1013 icache_put(new_dir->i_sb, new_dir);
1014 kref_put(&new_dir->i_kref);
1018 /* Creates a new inode for a symlink associated with dentry in dir, containing
1019 * the symlink symname */
1020 int create_symlink(struct inode *dir, struct dentry *dentry,
1021 const char *symname, int mode)
1023 struct inode *new_sym = create_inode(dentry, mode);
1026 dir->i_op->symlink(dir, dentry, symname);
1027 icache_put(new_sym->i_sb, new_sym);
1028 kref_put(&new_sym->i_kref);
1032 /* Returns 0 if the given mode is acceptable for the inode, and an appropriate
1033 * error code if not. Needs to be writen, based on some sensible rules, and
1034 * will also probably use 'current' */
1035 int check_perms(struct inode *inode, int access_mode)
1037 return 0; /* anything goes! */
1040 /* Called after all external refs are gone to clean up the inode. Once this is
1041 * called, all dentries pointing here are already done (one of them triggered
1042 * this via kref_put(). */
1043 void inode_release(struct kref *kref)
1045 struct inode *inode = container_of(kref, struct inode, i_kref);
1046 TAILQ_REMOVE(&inode->i_sb->s_inodes, inode, i_sb_list);
1047 icache_remove(inode->i_sb, inode->i_ino);
1048 /* Might need to write back or delete the file/inode */
1049 if (inode->i_nlink) {
1050 if (inode->i_state & I_STATE_DIRTY)
1051 inode->i_sb->s_op->write_inode(inode, TRUE);
1053 inode->i_sb->s_op->delete_inode(inode);
1055 if (S_ISFIFO(inode->i_mode)) {
1056 page_decref(kva2page(inode->i_pipe->p_buf));
1057 kfree(inode->i_pipe);
1060 // kref_put(inode->i_bdev->kref); /* assuming it's a bdev, could be a pipe*/
1061 /* Either way, we dealloc the in-memory version */
1062 inode->i_sb->s_op->dealloc_inode(inode); /* FS-specific clean-up */
1063 kref_put(&inode->i_sb->s_kref);
1064 /* TODO: clean this up */
1065 assert(inode->i_mapping == &inode->i_pm);
1066 kmem_cache_free(inode_kcache, inode);
1069 /* Fills in kstat with the stat information for the inode */
1070 void stat_inode(struct inode *inode, struct kstat *kstat)
1072 kstat->st_dev = inode->i_sb->s_dev;
1073 kstat->st_ino = inode->i_ino;
1074 kstat->st_mode = inode->i_mode;
1075 kstat->st_nlink = inode->i_nlink;
1076 kstat->st_uid = inode->i_uid;
1077 kstat->st_gid = inode->i_gid;
1078 kstat->st_rdev = inode->i_rdev;
1079 kstat->st_size = inode->i_size;
1080 kstat->st_blksize = inode->i_blksize;
1081 kstat->st_blocks = inode->i_blocks;
1082 kstat->st_atime = inode->i_atime;
1083 kstat->st_mtime = inode->i_mtime;
1084 kstat->st_ctime = inode->i_ctime;
1087 /* Inode Cache management. In general, search on the ino, get a refcnt'd value
1088 * back. Remove does not give you a reference back - it should only be called
1089 * in inode_release(). */
1090 struct inode *icache_get(struct super_block *sb, unsigned long ino)
1092 /* This is the same style as in pid2proc, it's the "safely create a strong
1093 * reference from a weak one, so long as other strong ones exist" pattern */
1094 spin_lock(&sb->s_icache_lock);
1095 struct inode *inode = hashtable_search(sb->s_icache, (void*)ino);
1097 if (!kref_get_not_zero(&inode->i_kref, 1))
1099 spin_unlock(&sb->s_icache_lock);
1103 void icache_put(struct super_block *sb, struct inode *inode)
1105 spin_lock(&sb->s_icache_lock);
1106 /* there's a race in load_ino() that could trigger this */
1107 assert(!hashtable_search(sb->s_icache, (void*)inode->i_ino));
1108 hashtable_insert(sb->s_icache, (void*)inode->i_ino, inode);
1109 spin_unlock(&sb->s_icache_lock);
1112 struct inode *icache_remove(struct super_block *sb, unsigned long ino)
1114 struct inode *inode;
1115 /* Presumably these hashtable removals could be easier since callers
1116 * actually know who they are (same with the pid2proc hash) */
1117 spin_lock(&sb->s_icache_lock);
1118 inode = hashtable_remove(sb->s_icache, (void*)ino);
1119 spin_unlock(&sb->s_icache_lock);
1120 assert(inode && !kref_refcnt(&inode->i_kref));
1124 /* File functions */
1126 /* Read count bytes from the file into buf, starting at *offset, which is
1127 * increased accordingly, returning the number of bytes transfered. Most
1128 * filesystems will use this function for their f_op->read.
1129 * Note, this uses the page cache. */
1130 ssize_t generic_file_read(struct file *file, char *buf, size_t count,
1136 unsigned long first_idx, last_idx;
1140 /* Consider pushing some error checking higher in the VFS */
1143 if (*offset == file->f_dentry->d_inode->i_size)
1145 /* Make sure we don't go past the end of the file */
1146 if (*offset + count > file->f_dentry->d_inode->i_size) {
1147 count = file->f_dentry->d_inode->i_size - *offset;
1149 page_off = *offset & (PGSIZE - 1);
1150 first_idx = *offset >> PGSHIFT;
1151 last_idx = (*offset + count) >> PGSHIFT;
1152 buf_end = buf + count;
1153 /* For each file page, make sure it's in the page cache, then copy it out.
1154 * TODO: will probably need to consider concurrently truncated files here.*/
1155 for (int i = first_idx; i <= last_idx; i++) {
1156 error = pm_load_page(file->f_mapping, i, &page);
1157 assert(!error); /* TODO: handle ENOMEM and friends */
1158 copy_amt = MIN(PGSIZE - page_off, buf_end - buf);
1159 /* TODO: (UMEM) think about this. if it's a user buffer, we're relying
1160 * on current to detect whose it is (which should work for async calls).
1161 * Also, need to propagate errors properly... Probably should do a
1162 * user_mem_check, then free, and also to make a distinction between
1163 * when the kernel wants a read/write (TODO: KFOP) */
1165 memcpy_to_user(current, buf, page2kva(page) + page_off, copy_amt);
1167 memcpy(buf, page2kva(page) + page_off, copy_amt);
1171 pm_put_page(page); /* it's still in the cache, we just don't need it */
1173 assert(buf == buf_end);
1178 /* Write count bytes from buf to the file, starting at *offset, which is
1179 * increased accordingly, returning the number of bytes transfered. Most
1180 * filesystems will use this function for their f_op->write. Note, this uses
1183 * Changes don't get flushed to disc til there is an fsync, page cache eviction,
1184 * or other means of trying to writeback the pages. */
1185 ssize_t generic_file_write(struct file *file, const char *buf, size_t count,
1191 unsigned long first_idx, last_idx;
1193 const char *buf_end;
1195 /* Consider pushing some error checking higher in the VFS */
1198 /* Extend the file. Should put more checks in here, and maybe do this per
1199 * page in the for loop below. */
1200 if (*offset + count > file->f_dentry->d_inode->i_size)
1201 file->f_dentry->d_inode->i_size = *offset + count;
1202 page_off = *offset & (PGSIZE - 1);
1203 first_idx = *offset >> PGSHIFT;
1204 last_idx = (*offset + count) >> PGSHIFT;
1205 buf_end = buf + count;
1206 /* For each file page, make sure it's in the page cache, then write it.*/
1207 for (int i = first_idx; i <= last_idx; i++) {
1208 error = pm_load_page(file->f_mapping, i, &page);
1209 assert(!error); /* TODO: handle ENOMEM and friends */
1210 copy_amt = MIN(PGSIZE - page_off, buf_end - buf);
1211 /* TODO: (UMEM) (KFOP) think about this. if it's a user buffer, we're
1212 * relying on current to detect whose it is (which should work for async
1215 memcpy_from_user(current, page2kva(page) + page_off, buf, copy_amt);
1217 memcpy(page2kva(page) + page_off, buf, copy_amt);
1221 atomic_or(&page->pg_flags, PG_DIRTY);
1222 pm_put_page(page); /* it's still in the cache, we just don't need it */
1224 assert(buf == buf_end);
1229 /* Directories usually use this for their read method, which is the way glibc
1230 * currently expects us to do a readdir (short of doing linux's getdents). Will
1231 * probably need work, based on whatever real programs want. */
1232 ssize_t generic_dir_read(struct file *file, char *u_buf, size_t count,
1235 struct kdirent dir_r = {0}, *dirent = &dir_r;
1237 size_t amt_copied = 0;
1238 char *buf_end = u_buf + count;
1240 if (!S_ISDIR(file->f_dentry->d_inode->i_mode)) {
1246 /* start readdir from where it left off: */
1247 dirent->d_off = *offset;
1249 u_buf + sizeof(struct kdirent) <= buf_end;
1250 u_buf += sizeof(struct kdirent)) {
1251 /* TODO: UMEM/KFOP (pin the u_buf in the syscall, ditch the local copy,
1252 * get rid of this memcpy and reliance on current, etc). Might be
1253 * tricky with the dirent->d_off and trust issues */
1254 retval = file->f_op->readdir(file, dirent);
1259 /* Slight info exposure: could be extra crap after the name in the
1260 * dirent (like the name of a deleted file) */
1262 memcpy_to_user(current, u_buf, dirent, sizeof(struct dirent));
1264 memcpy(u_buf, dirent, sizeof(struct dirent));
1266 amt_copied += sizeof(struct dirent);
1267 /* 0 signals end of directory */
1271 /* Next time read is called, we pick up where we left off */
1272 *offset = dirent->d_off; /* UMEM */
1273 /* important to tell them how much they got. they often keep going til they
1274 * get 0 back (in the case of ls). it's also how much has been read, but it
1275 * isn't how much the f_pos has moved (which is opaque to the VFS). */
1279 /* Opens the file, using permissions from current for lack of a better option.
1280 * It will attempt to create the file if it does not exist and O_CREAT is
1281 * specified. This will return 0 on failure, and set errno. TODO: There's some
1282 * stuff that we don't do, esp related file truncating/creation. flags are for
1283 * opening, the mode is for creating. The flags related to how to create
1284 * (O_CREAT_FLAGS) are handled in this function, not in create_file().
1286 * It's tempting to split this into a do_file_create and a do_file_open, based
1287 * on the O_CREAT flag, but the O_CREAT flag can be ignored if the file exists
1288 * already and O_EXCL isn't specified. We could have open call create if it
1289 * fails, but for now we'll keep it as is. */
1290 struct file *do_file_open(char *path, int flags, int mode)
1292 struct file *file = 0;
1293 struct dentry *file_d;
1294 struct inode *parent_i;
1295 struct nameidata nd_r = {0}, *nd = &nd_r;
1298 /* The file might exist, lets try to just open it right away */
1299 nd->intent = LOOKUP_OPEN;
1300 error = path_lookup(path, LOOKUP_FOLLOW, nd);
1302 /* Still need to make sure we didn't want to O_EXCL create */
1303 if ((flags & O_CREAT) && (flags & O_EXCL)) {
1307 file_d = nd->dentry;
1308 kref_get(&file_d->d_kref, 1);
1311 /* So it didn't already exist, release the path from the previous lookup,
1312 * and then we try to create it. */
1314 /* get the parent, following links. this means you get the parent of the
1315 * final link (which may not be in 'path' in the first place. */
1316 nd->intent = LOOKUP_CREATE;
1317 error = path_lookup(path, LOOKUP_PARENT | LOOKUP_FOLLOW, nd);
1322 /* see if the target is there (shouldn't be), and handle accordingly */
1323 file_d = do_lookup(nd->dentry, nd->last.name);
1325 if (!(flags & O_CREAT)) {
1329 /* Create the inode/file. get a fresh dentry too: */
1330 file_d = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
1333 parent_i = nd->dentry->d_inode;
1334 /* Note that the mode technically should only apply to future opens,
1335 * but we apply it immediately. */
1336 if (create_file(parent_i, file_d, mode)) /* sets errno */
1338 dcache_put(file_d->d_sb, file_d);
1339 } else { /* something already exists */
1340 /* this can happen due to concurrent access, but needs to be thought
1342 panic("File shouldn't be here!");
1343 if ((flags & O_CREAT) && (flags & O_EXCL)) {
1344 /* wanted to create, not open, bail out */
1350 /* now open the file (freshly created or if it already existed). At this
1351 * point, file_d is a refcnt'd dentry, regardless of which branch we took.*/
1352 if (flags & O_TRUNC) {
1353 file_d->d_inode->i_size = 0;
1354 /* TODO: probably should remove the garbage pages from the page map */
1356 file = dentry_open(file_d, flags); /* sets errno */
1357 /* Note the fall through to the exit paths. File is 0 by default and if
1358 * dentry_open fails. */
1360 kref_put(&file_d->d_kref);
1366 /* Path is the location of the symlink, sometimes called the "new path", and
1367 * symname is who we link to, sometimes called the "old path". */
1368 int do_symlink(char *path, const char *symname, int mode)
1370 struct dentry *sym_d;
1371 struct inode *parent_i;
1372 struct nameidata nd_r = {0}, *nd = &nd_r;
1376 nd->intent = LOOKUP_CREATE;
1377 /* get the parent, but don't follow links */
1378 error = path_lookup(path, LOOKUP_PARENT, nd);
1383 /* see if the target is already there, handle accordingly */
1384 sym_d = do_lookup(nd->dentry, nd->last.name);
1389 /* Doesn't already exist, let's try to make it: */
1390 sym_d = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
1393 parent_i = nd->dentry->d_inode;
1394 if (create_symlink(parent_i, sym_d, symname, mode))
1396 dcache_put(sym_d->d_sb, sym_d);
1397 retval = 0; /* Note the fall through to the exit paths */
1399 kref_put(&sym_d->d_kref);
1405 /* Makes a hard link for the file behind old_path to new_path */
1406 int do_link(char *old_path, char *new_path)
1408 struct dentry *link_d, *old_d;
1409 struct inode *inode, *parent_dir;
1410 struct nameidata nd_r = {0}, *nd = &nd_r;
1414 nd->intent = LOOKUP_CREATE;
1415 /* get the absolute parent of the new_path */
1416 error = path_lookup(new_path, LOOKUP_PARENT | LOOKUP_FOLLOW, nd);
1421 parent_dir = nd->dentry->d_inode;
1422 /* see if the new target is already there, handle accordingly */
1423 link_d = do_lookup(nd->dentry, nd->last.name);
1428 /* Doesn't already exist, let's try to make it. Still need to stitch it to
1429 * an inode and set its FS-specific stuff after this.*/
1430 link_d = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
1433 /* Now let's get the old_path target */
1434 old_d = lookup_dentry(old_path, LOOKUP_FOLLOW);
1435 if (!old_d) /* errno set by lookup_dentry */
1437 /* For now, can only link to files */
1438 if (!S_ISREG(old_d->d_inode->i_mode)) {
1442 /* Must be on the same FS */
1443 if (old_d->d_sb != link_d->d_sb) {
1447 /* Do whatever FS specific stuff there is first (which is also a chance to
1449 error = parent_dir->i_op->link(old_d, parent_dir, link_d);
1454 /* Finally stitch it up */
1455 inode = old_d->d_inode;
1456 kref_get(&inode->i_kref, 1);
1457 link_d->d_inode = inode;
1459 TAILQ_INSERT_TAIL(&inode->i_dentry, link_d, d_alias); /* weak ref */
1460 dcache_put(link_d->d_sb, link_d);
1461 retval = 0; /* Note the fall through to the exit paths */
1463 kref_put(&old_d->d_kref);
1465 kref_put(&link_d->d_kref);
1471 /* Unlinks path from the directory tree. Read the Documentation for more info.
1473 int do_unlink(char *path)
1475 struct dentry *dentry;
1476 struct inode *parent_dir;
1477 struct nameidata nd_r = {0}, *nd = &nd_r;
1481 /* get the parent of the target, and don't follow a final link */
1482 error = path_lookup(path, LOOKUP_PARENT, nd);
1487 parent_dir = nd->dentry->d_inode;
1488 /* make sure the target is there */
1489 dentry = do_lookup(nd->dentry, nd->last.name);
1494 /* Make sure the target is not a directory */
1495 if (S_ISDIR(dentry->d_inode->i_mode)) {
1499 /* Remove the dentry from its parent */
1500 error = parent_dir->i_op->unlink(parent_dir, dentry);
1505 /* Now that our parent doesn't track us, we need to make sure we aren't
1506 * findable via the dentry cache. DYING, so we will be freed in
1507 * dentry_release() */
1508 dentry->d_flags |= DENTRY_DYING;
1509 dcache_remove(dentry->d_sb, dentry);
1510 dentry->d_inode->i_nlink--; /* TODO: race here, esp with a decref */
1511 /* At this point, the dentry is unlinked from the FS, and the inode has one
1512 * less link. When the in-memory objects (dentry, inode) are going to be
1513 * released (after all open files are closed, and maybe after entries are
1514 * evicted from the cache), then nlinks will get checked and the FS-file
1515 * will get removed from the disk */
1516 retval = 0; /* Note the fall through to the exit paths */
1518 kref_put(&dentry->d_kref);
1524 /* Checks to see if path can be accessed via mode. Need to actually send the
1525 * mode along somehow, so this doesn't do much now. This is an example of
1526 * decent error propagation from the lower levels via int retvals. */
1527 int do_access(char *path, int mode)
1529 struct nameidata nd_r = {0}, *nd = &nd_r;
1531 nd->intent = LOOKUP_ACCESS;
1532 retval = path_lookup(path, 0, nd);
1537 int do_chmod(char *path, int mode)
1539 struct nameidata nd_r = {0}, *nd = &nd_r;
1541 retval = path_lookup(path, 0, nd);
1544 /* TODO: when we have notions of uid, check for the proc's uid */
1545 if (nd->dentry->d_inode->i_uid != UID_OF_ME)
1549 nd->dentry->d_inode->i_mode |= mode & S_PMASK;
1555 /* Make a directory at path with mode. Returns -1 and sets errno on errors */
1556 int do_mkdir(char *path, int mode)
1558 struct dentry *dentry;
1559 struct inode *parent_i;
1560 struct nameidata nd_r = {0}, *nd = &nd_r;
1564 nd->intent = LOOKUP_CREATE;
1565 /* get the parent, but don't follow links */
1566 error = path_lookup(path, LOOKUP_PARENT, nd);
1571 /* see if the target is already there, handle accordingly */
1572 dentry = do_lookup(nd->dentry, nd->last.name);
1577 /* Doesn't already exist, let's try to make it: */
1578 dentry = get_dentry(nd->dentry->d_sb, nd->dentry, nd->last.name);
1581 parent_i = nd->dentry->d_inode;
1582 if (create_dir(parent_i, dentry, mode))
1584 dcache_put(dentry->d_sb, dentry);
1585 retval = 0; /* Note the fall through to the exit paths */
1587 kref_put(&dentry->d_kref);
1593 int do_rmdir(char *path)
1595 struct dentry *dentry;
1596 struct inode *parent_i;
1597 struct nameidata nd_r = {0}, *nd = &nd_r;
1601 /* get the parent, following links (probably want this), and we must get a
1602 * directory. Note, current versions of path_lookup can't handle both
1603 * PARENT and DIRECTORY, at least, it doesn't check that *path is a
1605 error = path_lookup(path, LOOKUP_PARENT | LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
1611 /* make sure the target is already there, handle accordingly */
1612 dentry = do_lookup(nd->dentry, nd->last.name);
1617 if (!S_ISDIR(dentry->d_inode->i_mode)) {
1621 if (dentry->d_mount_point) {
1625 /* TODO: make sure we aren't a mount or processes root (EBUSY) */
1626 /* Now for the removal. the FSs will check if they are empty */
1627 parent_i = nd->dentry->d_inode;
1628 error = parent_i->i_op->rmdir(parent_i, dentry);
1633 /* Now that our parent doesn't track us, we need to make sure we aren't
1634 * findable via the dentry cache. DYING, so we will be freed in
1635 * dentry_release() */
1636 dentry->d_flags |= DENTRY_DYING;
1637 dcache_remove(dentry->d_sb, dentry);
1638 /* Decref ourselves, so inode_release() knows we are done */
1639 dentry->d_inode->i_nlink--;
1640 TAILQ_REMOVE(&nd->dentry->d_subdirs, dentry, d_subdirs_link);
1641 parent_i->i_nlink--; /* TODO: race on this, esp since its a decref */
1642 /* we still have d_parent and a kref on our parent, which will go away when
1643 * the in-memory dentry object goes away. */
1644 retval = 0; /* Note the fall through to the exit paths */
1646 kref_put(&dentry->d_kref);
1652 /* Pipes: Doing a simple buffer with reader and writer offsets. Size is power
1653 * of two, so we can easily compute its status and whatnot. */
1655 #define PIPE_SZ (1 << PGSHIFT)
1657 static size_t pipe_get_rd_idx(struct pipe_inode_info *pii)
1659 return pii->p_rd_off & (PIPE_SZ - 1);
1662 static size_t pipe_get_wr_idx(struct pipe_inode_info *pii)
1665 return pii->p_wr_off & (PIPE_SZ - 1);
1668 static bool pipe_is_empty(struct pipe_inode_info *pii)
1670 return __ring_empty(pii->p_wr_off, pii->p_rd_off);
1673 static bool pipe_is_full(struct pipe_inode_info *pii)
1675 return __ring_full(PIPE_SZ, pii->p_wr_off, pii->p_rd_off);
1678 static size_t pipe_nr_full(struct pipe_inode_info *pii)
1680 return __ring_nr_full(pii->p_wr_off, pii->p_rd_off);
1683 static size_t pipe_nr_empty(struct pipe_inode_info *pii)
1685 return __ring_nr_empty(PIPE_SZ, pii->p_wr_off, pii->p_rd_off);
1688 ssize_t pipe_file_read(struct file *file, char *buf, size_t count,
1691 struct pipe_inode_info *pii = file->f_dentry->d_inode->i_pipe;
1692 size_t copy_amt, amt_copied = 0;
1694 cv_lock(&pii->p_cv);
1695 while (pipe_is_empty(pii)) {
1696 /* We wait til the pipe is drained before sending EOF if there are no
1697 * writers (instead of aborting immediately) */
1698 if (!pii->p_nr_writers) {
1699 cv_unlock(&pii->p_cv);
1702 if (file->f_flags & O_NONBLOCK) {
1703 cv_unlock(&pii->p_cv);
1707 cv_wait(&pii->p_cv);
1710 /* We might need to wrap-around with our copy, so we'll do the copy in two
1711 * passes. This will copy up to the end of the buffer, then on the next
1712 * pass will copy the rest to the beginning of the buffer (if necessary) */
1713 for (int i = 0; i < 2; i++) {
1714 copy_amt = MIN(PIPE_SZ - pipe_get_rd_idx(pii),
1715 MIN(pipe_nr_full(pii), count));
1716 assert(current); /* shouldn't pipe from the kernel */
1717 memcpy_to_user(current, buf, pii->p_buf + pipe_get_rd_idx(pii),
1721 pii->p_rd_off += copy_amt;
1722 amt_copied += copy_amt;
1724 /* Just using one CV for both readers and writers. We should rarely have
1725 * multiple readers or writers. */
1727 __cv_broadcast(&pii->p_cv);
1728 cv_unlock(&pii->p_cv);
1732 /* Note: we're not dealing with PIPE_BUF and minimum atomic chunks, unless I
1734 ssize_t pipe_file_write(struct file *file, const char *buf, size_t count,
1737 struct pipe_inode_info *pii = file->f_dentry->d_inode->i_pipe;
1738 size_t copy_amt, amt_copied = 0;
1740 cv_lock(&pii->p_cv);
1741 /* Write aborts right away if there are no readers, regardless of pipe
1743 if (!pii->p_nr_readers) {
1744 cv_unlock(&pii->p_cv);
1748 while (pipe_is_full(pii)) {
1749 if (file->f_flags & O_NONBLOCK) {
1750 cv_unlock(&pii->p_cv);
1754 cv_wait(&pii->p_cv);
1756 /* Still need to check in the loop, in case the last reader left while
1758 if (!pii->p_nr_readers) {
1759 cv_unlock(&pii->p_cv);
1764 /* We might need to wrap-around with our copy, so we'll do the copy in two
1765 * passes. This will copy up to the end of the buffer, then on the next
1766 * pass will copy the rest to the beginning of the buffer (if necessary) */
1767 for (int i = 0; i < 2; i++) {
1768 copy_amt = MIN(PIPE_SZ - pipe_get_wr_idx(pii),
1769 MIN(pipe_nr_empty(pii), count));
1770 assert(current); /* shouldn't pipe from the kernel */
1771 memcpy_from_user(current, pii->p_buf + pipe_get_wr_idx(pii), buf,
1775 pii->p_wr_off += copy_amt;
1776 amt_copied += copy_amt;
1778 /* Just using one CV for both readers and writers. We should rarely have
1779 * multiple readers or writers. */
1781 __cv_broadcast(&pii->p_cv);
1782 cv_unlock(&pii->p_cv);
1786 /* In open and release, we need to track the number of readers and writers,
1787 * which we can differentiate by the file flags. */
1788 int pipe_open(struct inode *inode, struct file *file)
1790 struct pipe_inode_info *pii = inode->i_pipe;
1791 cv_lock(&pii->p_cv);
1792 /* Ugliness due to not using flags for O_RDONLY and friends... */
1793 if (file->f_mode == S_IRUSR) {
1794 pii->p_nr_readers++;
1795 } else if (file->f_mode == S_IWUSR) {
1796 pii->p_nr_writers++;
1798 warn("Bad pipe file flags 0x%x\n", file->f_flags);
1800 cv_unlock(&pii->p_cv);
1804 int pipe_release(struct inode *inode, struct file *file)
1806 struct pipe_inode_info *pii = inode->i_pipe;
1807 cv_lock(&pii->p_cv);
1808 /* Ugliness due to not using flags for O_RDONLY and friends... */
1809 if (file->f_mode == S_IRUSR) {
1810 pii->p_nr_readers--;
1811 } else if (file->f_mode == S_IWUSR) {
1812 pii->p_nr_writers--;
1814 warn("Bad pipe file flags 0x%x\n", file->f_flags);
1816 /* need to wake up any sleeping readers/writers, since we might be done */
1817 __cv_broadcast(&pii->p_cv);
1818 cv_unlock(&pii->p_cv);
1822 struct file_operations pipe_f_op = {
1823 .read = pipe_file_read,
1824 .write = pipe_file_write,
1826 .release = pipe_release,
1830 void pipe_debug(struct file *f)
1832 struct pipe_inode_info *pii = f->f_dentry->d_inode->i_pipe;
1834 printk("PIPE %p\n", pii);
1835 printk("\trdoff %p\n", pii->p_rd_off);
1836 printk("\twroff %p\n", pii->p_wr_off);
1837 printk("\tnr_rds %d\n", pii->p_nr_readers);
1838 printk("\tnr_wrs %d\n", pii->p_nr_writers);
1839 printk("\tcv waiters %d\n", pii->p_cv.nr_waiters);
1843 /* General plan: get a dentry/inode to represent the pipe. We'll alloc it from
1844 * the default_ns SB, but won't actually link it anywhere. It'll only be held
1845 * alive by the krefs, til all the FDs are closed. */
1846 int do_pipe(struct file **pipe_files, int flags)
1848 struct dentry *pipe_d;
1849 struct inode *pipe_i;
1850 struct file *pipe_f_read, *pipe_f_write;
1851 struct super_block *def_sb = default_ns.root->mnt_sb;
1852 struct pipe_inode_info *pii;
1854 pipe_d = get_dentry(def_sb, 0, "pipe");
1857 pipe_d->d_op = &dummy_d_op;
1858 pipe_i = get_inode(pipe_d);
1860 goto error_post_dentry;
1861 /* preemptively mark the dentry for deletion. we have an unlinked dentry
1862 * right off the bat, held in only by the kref chain (pipe_d is the ref). */
1863 pipe_d->d_flags |= DENTRY_DYING;
1864 /* pipe_d->d_inode still has one ref to pipe_i, keeping the inode alive */
1865 kref_put(&pipe_i->i_kref);
1866 /* init inode fields. note we're using the dummy ops for i_op and d_op */
1867 pipe_i->i_mode = S_IRWXU | S_IRWXG | S_IRWXO;
1868 SET_FTYPE(pipe_i->i_mode, __S_IFIFO); /* using type == FIFO */
1869 pipe_i->i_nlink = 1; /* one for the dentry */
1872 pipe_i->i_size = PGSIZE;
1873 pipe_i->i_blocks = 0;
1874 pipe_i->i_atime.tv_sec = 0;
1875 pipe_i->i_atime.tv_nsec = 0;
1876 pipe_i->i_mtime.tv_sec = 0;
1877 pipe_i->i_mtime.tv_nsec = 0;
1878 pipe_i->i_ctime.tv_sec = 0;
1879 pipe_i->i_ctime.tv_nsec = 0;
1880 pipe_i->i_fs_info = 0;
1881 pipe_i->i_op = &dummy_i_op;
1882 pipe_i->i_fop = &pipe_f_op;
1883 pipe_i->i_socket = FALSE;
1884 /* Actually build the pipe. We're using one page, hanging off the
1885 * pipe_inode_info struct. When we release the inode, we free the pipe
1887 pipe_i->i_pipe = kmalloc(sizeof(struct pipe_inode_info), KMALLOC_WAIT);
1888 pii = pipe_i->i_pipe;
1893 pii->p_buf = kpage_zalloc_addr();
1900 pii->p_nr_readers = 0;
1901 pii->p_nr_writers = 0;
1902 cv_init(&pii->p_cv); /* must do this before dentry_open / pipe_open */
1903 /* Now we have an inode for the pipe. We need two files for the read and
1904 * write ends of the pipe. */
1905 flags &= ~(O_ACCMODE); /* avoid user bugs */
1906 pipe_f_read = dentry_open(pipe_d, flags | O_RDONLY);
1909 pipe_f_write = dentry_open(pipe_d, flags | O_WRONLY);
1912 pipe_files[0] = pipe_f_read;
1913 pipe_files[1] = pipe_f_write;
1917 kref_put(&pipe_f_read->f_kref);
1919 page_decref(kva2page(pii->p_buf));
1921 kfree(pipe_i->i_pipe);
1923 /* We don't need to free the pipe_i; putting the dentry will free it */
1925 /* Note we only free the dentry on failure. */
1926 kref_put(&pipe_d->d_kref);
1930 struct file *alloc_file(void)
1932 struct file *file = kmem_cache_alloc(file_kcache, 0);
1937 /* one for the ref passed out*/
1938 kref_init(&file->f_kref, file_release, 1);
1942 /* Opens and returns the file specified by dentry */
1943 struct file *dentry_open(struct dentry *dentry, int flags)
1945 struct inode *inode;
1948 inode = dentry->d_inode;
1949 /* Do the mode first, since we can still error out. f_mode stores how the
1950 * OS file is open, which can be more restrictive than the i_mode */
1951 switch (flags & (O_RDONLY | O_WRONLY | O_RDWR)) {
1953 desired_mode = S_IRUSR;
1956 desired_mode = S_IWUSR;
1959 desired_mode = S_IRUSR | S_IWUSR;
1964 if (check_perms(inode, desired_mode))
1966 file = alloc_file();
1969 file->f_mode = desired_mode;
1970 /* Add to the list of all files of this SB */
1971 TAILQ_INSERT_TAIL(&inode->i_sb->s_files, file, f_list);
1972 kref_get(&dentry->d_kref, 1);
1973 file->f_dentry = dentry;
1974 kref_get(&inode->i_sb->s_mount->mnt_kref, 1);
1975 file->f_vfsmnt = inode->i_sb->s_mount; /* saving a ref to the vmnt...*/
1976 file->f_op = inode->i_fop;
1977 /* Don't store creation flags */
1978 file->f_flags = flags & ~O_CREAT_FLAGS;
1980 file->f_uid = inode->i_uid;
1981 file->f_gid = inode->i_gid;
1983 // struct event_poll_tailq f_ep_links;
1984 spinlock_init(&file->f_ep_lock);
1985 file->f_privdata = 0; /* prob overriden by the fs */
1986 file->f_mapping = inode->i_mapping;
1987 file->f_op->open(inode, file);
1994 /* Closes a file, fsync, whatever else is necessary. Called when the kref hits
1995 * 0. Note that the file is not refcounted on the s_files list, nor is the
1996 * f_mapping refcounted (it is pinned by the i_mapping). */
1997 void file_release(struct kref *kref)
1999 struct file *file = container_of(kref, struct file, f_kref);
2001 struct super_block *sb = file->f_dentry->d_sb;
2002 spin_lock(&sb->s_lock);
2003 TAILQ_REMOVE(&sb->s_files, file, f_list);
2004 spin_unlock(&sb->s_lock);
2006 /* TODO: fsync (BLK). also, we may want to parallelize the blocking that
2007 * could happen in here (spawn kernel threads)... */
2008 file->f_op->release(file->f_dentry->d_inode, file);
2009 /* Clean up the other refs we hold */
2010 kref_put(&file->f_dentry->d_kref);
2011 kref_put(&file->f_vfsmnt->mnt_kref);
2012 kmem_cache_free(file_kcache, file);
2015 /* Process-related File management functions */
2017 /* Given any FD, get the appropriate file, 0 o/w */
2018 struct file *get_file_from_fd(struct files_struct *open_files, int file_desc)
2020 struct file *retval = 0;
2023 spin_lock(&open_files->lock);
2024 if (open_files->closed) {
2025 spin_unlock(&open_files->lock);
2028 if (file_desc < open_files->max_fdset) {
2029 if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
2030 /* while max_files and max_fdset might not line up, we should never
2031 * have a valid fdset higher than files */
2032 assert(file_desc < open_files->max_files);
2033 retval = open_files->fd[file_desc].fd_file;
2034 /* 9ns might be using this one, in which case file == 0 */
2036 kref_get(&retval->f_kref, 1);
2039 spin_unlock(&open_files->lock);
2043 /* 9ns: puts back an FD from the VFS-FD-space. */
2044 int put_fd(struct files_struct *open_files, int file_desc)
2046 if (file_desc < 0) {
2047 warn("Negative FD!\n");
2050 spin_lock(&open_files->lock);
2051 if (file_desc < open_files->max_fdset) {
2052 if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
2053 /* while max_files and max_fdset might not line up, we should never
2054 * have a valid fdset higher than files */
2055 assert(file_desc < open_files->max_files);
2056 CLR_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc);
2059 spin_unlock(&open_files->lock);
2063 /* Remove FD from the open files, if it was there, and return f. Currently,
2064 * this decref's f, so the return value is not consumable or even usable. This
2065 * hasn't been thought through yet. */
2066 struct file *put_file_from_fd(struct files_struct *open_files, int file_desc)
2068 struct file *file = 0;
2071 spin_lock(&open_files->lock);
2072 if (file_desc < open_files->max_fdset) {
2073 if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc)) {
2074 /* while max_files and max_fdset might not line up, we should never
2075 * have a valid fdset higher than files */
2076 assert(file_desc < open_files->max_files);
2077 file = open_files->fd[file_desc].fd_file;
2078 open_files->fd[file_desc].fd_file = 0;
2080 kref_put(&file->f_kref);
2081 CLR_BITMASK_BIT(open_files->open_fds->fds_bits, file_desc);
2084 spin_unlock(&open_files->lock);
2088 static int __get_fd(struct files_struct *open_files, int low_fd)
2091 if ((low_fd < 0) || (low_fd > NR_FILE_DESC_MAX))
2093 if (open_files->closed)
2094 return -EINVAL; /* won't matter, they are dying */
2095 for (int i = low_fd; i < open_files->max_fdset; i++) {
2096 if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, i))
2099 SET_BITMASK_BIT(open_files->open_fds->fds_bits, slot);
2100 assert(slot < open_files->max_files &&
2101 open_files->fd[slot].fd_file == 0);
2102 if (slot >= open_files->next_fd)
2103 open_files->next_fd = slot + 1;
2106 if (slot == -1) /* should expand the FD array and fd_set */
2107 warn("Ran out of file descriptors, deal with me!");
2111 /* Gets and claims a free FD, used by 9ns. < 0 == error. */
2112 int get_fd(struct files_struct *open_files, int low_fd)
2115 spin_lock(&open_files->lock);
2116 slot = __get_fd(open_files, low_fd);
2117 spin_unlock(&open_files->lock);
2121 /* Inserts the file in the files_struct, returning the corresponding new file
2122 * descriptor, or an error code. We start looking for open fds from low_fd. */
2123 int insert_file(struct files_struct *open_files, struct file *file, int low_fd)
2126 spin_lock(&open_files->lock);
2127 slot = __get_fd(open_files, low_fd);
2129 spin_unlock(&open_files->lock);
2132 assert(slot < open_files->max_files &&
2133 open_files->fd[slot].fd_file == 0);
2134 kref_get(&file->f_kref, 1);
2135 open_files->fd[slot].fd_file = file;
2136 open_files->fd[slot].fd_flags = 0;
2137 spin_unlock(&open_files->lock);
2141 /* Closes all open files. Mostly just a "put" for all files. If cloexec, it
2142 * will only close files that are opened with O_CLOEXEC. */
2143 void close_all_files(struct files_struct *open_files, bool cloexec)
2146 spin_lock(&open_files->lock);
2147 if (open_files->closed) {
2148 spin_unlock(&open_files->lock);
2152 open_files->closed = TRUE;
2153 for (int i = 0; i < open_files->max_fdset; i++) {
2154 if (GET_BITMASK_BIT(open_files->open_fds->fds_bits, i)) {
2155 /* while max_files and max_fdset might not line up, we should never
2156 * have a valid fdset higher than files */
2157 assert(i < open_files->max_files);
2158 file = open_files->fd[i].fd_file;
2159 /* no file == 9ns uses the FD. they will deal with it */
2162 if (cloexec && !(open_files->fd[i].fd_flags & O_CLOEXEC))
2164 /* Actually close the file */
2165 open_files->fd[i].fd_file = 0;
2167 kref_put(&file->f_kref);
2168 CLR_BITMASK_BIT(open_files->open_fds->fds_bits, i);
2171 spin_unlock(&open_files->lock);
2174 /* Inserts all of the files from src into dst, used by sys_fork(). */
2175 void clone_files(struct files_struct *src, struct files_struct *dst)
2178 spin_lock(&src->lock);
2180 spin_unlock(&src->lock);
2183 spin_lock(&dst->lock);
2185 warn("Destination closed before it opened");
2186 spin_unlock(&dst->lock);
2187 spin_unlock(&src->lock);
2190 for (int i = 0; i < src->max_fdset; i++) {
2191 if (GET_BITMASK_BIT(src->open_fds->fds_bits, i)) {
2192 /* while max_files and max_fdset might not line up, we should never
2193 * have a valid fdset higher than files */
2194 assert(i < src->max_files);
2195 file = src->fd[i].fd_file;
2196 assert(i < dst->max_files && dst->fd[i].fd_file == 0);
2197 SET_BITMASK_BIT(dst->open_fds->fds_bits, i);
2198 dst->fd[i].fd_file = file;
2200 kref_get(&file->f_kref, 1);
2201 if (i >= dst->next_fd)
2202 dst->next_fd = i + 1;
2205 spin_unlock(&dst->lock);
2206 spin_unlock(&src->lock);
2209 /* Change the working directory of the given fs env (one per process, at this
2210 * point). Returns 0 for success, -ERROR for whatever error. */
2211 int do_chdir(struct fs_struct *fs_env, char *path)
2213 struct nameidata nd_r = {0}, *nd = &nd_r;
2215 retval = path_lookup(path, LOOKUP_DIRECTORY, nd);
2217 /* nd->dentry is the place we want our PWD to be */
2218 kref_get(&nd->dentry->d_kref, 1);
2219 kref_put(&fs_env->pwd->d_kref);
2220 fs_env->pwd = nd->dentry;
2226 /* Returns a null-terminated string of up to length cwd_l containing the
2227 * absolute path of fs_env, (up to fs_env's root). Be sure to kfree the char*
2228 * "kfree_this" when you are done with it. We do this since it's easier to
2229 * build this string going backwards. Note cwd_l is not a strlen, it's an
2231 char *do_getcwd(struct fs_struct *fs_env, char **kfree_this, size_t cwd_l)
2233 struct dentry *dentry = fs_env->pwd;
2235 char *path_start, *kbuf;
2241 kbuf = kmalloc(cwd_l, 0);
2247 kbuf[cwd_l - 1] = '\0';
2248 kbuf[cwd_l - 2] = '/';
2249 /* for each dentry in the path, all the way back to the root of fs_env, we
2250 * grab the dentry name, push path_start back enough, and write in the name,
2251 * using /'s to terminate. We skip the root, since we don't want it's
2252 * actual name, just "/", which is set before each loop. */
2253 path_start = kbuf + cwd_l - 2; /* the last byte written */
2254 while (dentry != fs_env->root) {
2255 link_len = dentry->d_name.len; /* this does not count the \0 */
2256 if (path_start - (link_len + 2) < kbuf) {
2261 path_start -= link_len + 1; /* the 1 is for the \0 */
2262 strncpy(path_start, dentry->d_name.name, link_len);
2265 dentry = dentry->d_parent;
2270 static void print_dir(struct dentry *dentry, char *buf, int depth)
2272 struct dentry *child_d;
2273 struct dirent next = {0};
2277 if (!S_ISDIR(dentry->d_inode->i_mode)) {
2278 warn("Thought this was only directories!!");
2281 /* Print this dentry */
2282 printk("%s%s/ nlink: %d\n", buf, dentry->d_name.name,
2283 dentry->d_inode->i_nlink);
2284 if (dentry->d_mount_point) {
2285 dentry = dentry->d_mounted_fs->mnt_root;
2289 /* Set buffer for our kids */
2291 dir = dentry_open(dentry, 0);
2293 panic("Filesystem seems inconsistent - unable to open a dir!");
2294 /* Process every child, recursing on directories */
2296 retval = dir->f_op->readdir(dir, &next);
2298 /* Skip .., ., and empty entries */
2299 if (!strcmp("..", next.d_name) || !strcmp(".", next.d_name) ||
2302 /* there is an entry, now get its dentry */
2303 child_d = do_lookup(dentry, next.d_name);
2305 panic("Inconsistent FS, dirent doesn't have a dentry!");
2306 /* Recurse for directories, or just print the name for others */
2307 switch (child_d->d_inode->i_mode & __S_IFMT) {
2309 print_dir(child_d, buf, depth + 1);
2312 printk("%s%s size(B): %d nlink: %d\n", buf, next.d_name,
2313 child_d->d_inode->i_size, child_d->d_inode->i_nlink);
2316 printk("%s%s -> %s\n", buf, next.d_name,
2317 child_d->d_inode->i_op->readlink(child_d));
2320 printk("%s%s (char device) nlink: %d\n", buf, next.d_name,
2321 child_d->d_inode->i_nlink);
2324 printk("%s%s (block device) nlink: %d\n", buf, next.d_name,
2325 child_d->d_inode->i_nlink);
2328 warn("Look around you! Unknown filetype!");
2330 kref_put(&child_d->d_kref);
2336 /* Reset buffer to the way it was */
2338 kref_put(&dir->f_kref);
2342 int ls_dash_r(char *path)
2344 struct nameidata nd_r = {0}, *nd = &nd_r;
2348 error = path_lookup(path, LOOKUP_ACCESS | LOOKUP_DIRECTORY, nd);
2353 print_dir(nd->dentry, buf, 0);
2358 /* Dummy ops, to catch weird operations we weren't expecting */
2359 int dummy_create(struct inode *dir, struct dentry *dentry, int mode,
2360 struct nameidata *nd)
2362 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2366 struct dentry *dummy_lookup(struct inode *dir, struct dentry *dentry,
2367 struct nameidata *nd)
2369 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2373 int dummy_link(struct dentry *old_dentry, struct inode *dir,
2374 struct dentry *new_dentry)
2376 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2380 int dummy_unlink(struct inode *dir, struct dentry *dentry)
2382 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2386 int dummy_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2388 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2392 int dummy_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2394 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2398 int dummy_rmdir(struct inode *dir, struct dentry *dentry)
2400 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2404 int dummy_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
2406 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2410 int dummy_rename(struct inode *old_dir, struct dentry *old_dentry,
2411 struct inode *new_dir, struct dentry *new_dentry)
2413 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2417 char *dummy_readlink(struct dentry *dentry)
2419 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2423 void dummy_truncate(struct inode *inode)
2425 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2428 int dummy_permission(struct inode *inode, int mode, struct nameidata *nd)
2430 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2434 int dummy_d_revalidate(struct dentry *dir, struct nameidata *nd)
2436 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2440 int dummy_d_hash(struct dentry *dentry, struct qstr *name)
2442 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2446 int dummy_d_compare(struct dentry *dir, struct qstr *name1, struct qstr *name2)
2448 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2452 int dummy_d_delete(struct dentry *dentry)
2454 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2458 int dummy_d_release(struct dentry *dentry)
2460 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2464 void dummy_d_iput(struct dentry *dentry, struct inode *inode)
2466 printk("Dummy VFS function %s called!\n", __FUNCTION__);
2469 struct inode_operations dummy_i_op = {
2484 struct dentry_operations dummy_d_op = {