/* See COPYRIGHT for copyright information. */
+
+/** @file
+ * This file is responsible for managing physical pages as they
+ * are mapped into the page tables of a particular virtual address
+ * space. The functions defined in this file operate on these
+ * page tables to insert and remove physical pages from them at
+ * particular virtual addresses.
+ *
+ * @author Kevin Klues <klueska@cs.berkeley.edu>
+ * @author Barret Rhoden <brho@cs.berkeley.edu>
+ */
+
+#ifdef __SHARC__
+#pragma nosharc
+#endif
+
#ifdef __DEPUTY__
#pragma nodeputy
#endif
#include <ros/error.h>
+#include <kmalloc.h>
#include <atomic.h>
#include <string.h>
#include <assert.h>
#include <pmap.h>
#include <kclock.h>
#include <process.h>
+#include <stdio.h>
-//
-// Allocate n bytes of physical memory aligned on an
-// align-byte boundary. Align must be a power of two.
-// Return kernel virtual address. Returned memory is uninitialized.
-//
-// If we're out of memory, boot_alloc should panic.
-// This function may ONLY be used during initialization,
-// before the page_free_list has been set up.
-//
-void*
-boot_alloc(uint32_t n, uint32_t align)
-{
- extern char end[];
- void *v;
-
- // Initialize boot_freemem if this is the first time.
- // 'end' is a magic symbol automatically generated by the linker,
- // which points to the end of the kernel's bss segment -
- // i.e., the first virtual address that the linker
- // did _not_ assign to any kernel code or global variables.
- if (boot_freemem == 0)
- boot_freemem = end;
-
- // Step 1: round boot_freemem up to be aligned properly
- boot_freemem = ROUNDUP(boot_freemem, align);
-
- // Step 2: save current value of boot_freemem as allocated chunk
- v = boot_freemem;
- // Step 2.5: check if we can alloc
- if (PADDR(boot_freemem + n) > maxaddrpa)
- panic("Out of memory in boot alloc, you fool!\n");
- // Step 3: increase boot_freemem to record allocation
- boot_freemem += n;
- // Step 4: return allocated chunk
- return v;
-}
+/**
+ * @brief Global variable used to store erroneous virtual addresses as the
+ * result of a failed user_mem_check().
+ *
+ * zra: What if two checks fail at the same time? Maybe this should be per-cpu?
+ *
+ */
+static void *DANGEROUS RACY user_mem_check_addr;
-//
-// Initialize a Page structure.
-// The result has null links and 0 refcount.
-// Note that the corresponding physical page is NOT initialized!
-//
-static void
-page_initpp(page_t *pp)
-{
- memset(pp, 0, sizeof(*pp));
-}
+volatile uint32_t vpt_lock = 0;
+volatile uint32_t vpd_lock = 0;
-/*
- * Allocates a physical page.
- * Does NOT set the contents of the physical page to zero -
- * the caller must do that if necessary.
- *
- * *pp_store -- is set to point to the Page struct
- * of the newly allocated page
+/**
+ * @brief Initialize the array of physical pages and memory free list.
*
- * RETURNS
- * 0 -- on success
- * -ENOMEM -- otherwise
+ * The 'pages' array has one 'page_t' entry per physical page.
+ * Pages are reference counted, and free pages are kept on a linked list.
*/
-int page_alloc(page_t **pp_store)
+void page_init(void)
{
- if (LIST_EMPTY(&page_free_list))
- return -ENOMEM;
- *pp_store = LIST_FIRST(&page_free_list);
- LIST_REMOVE(*pp_store, pp_link);
- page_initpp(*pp_store);
- return 0;
+ /*
+ * First, make 'pages' point to an array of size 'npages' of
+ * type 'page_t'.
+ * The kernel uses this structure to keep track of physical pages;
+ * 'npages' equals the number of physical pages in memory.
+ * round up to the nearest page
+ */
+ pages = (page_t*)boot_alloc(npages*sizeof(page_t), PGSIZE);
+ memset(pages, 0, npages*sizeof(page_t));
+
+ /*
+ * Then initilaize everything so pages can start to be alloced and freed
+ * from the memory free list
+ */
+ page_alloc_init();
}
-/*
- * Allocates a specific physical page.
- * Does NOT set the contents of the physical page to zero -
- * the caller must do that if necessary.
+/**
+ * @brief Map the physical page 'pp' into the virtual address 'va' in page
+ * directory 'pgdir'
+ *
+ * Map the physical page 'pp' at virtual address 'va'.
+ * The permissions (the low 12 bits) of the page table
+ * entry should be set to 'perm|PTE_P'.
+ *
+ * Details:
+ * - If there is already a page mapped at 'va', it is page_remove()d.
+ * - If necessary, on demand, allocates a page table and inserts it into
+ * 'pgdir'.
+ * - page_incref() should be called if the insertion succeeds.
+ * - The TLB must be invalidated if a page was formerly present at 'va'.
+ * (this is handled in page_remove)
+ *
+ * No support for jumbos here. We will need to be careful when trying to
+ * insert regular pages into something that was already jumbo. We will
+ * also need to be careful with our overloading of the PTE_PS and
+ * PTE_PAT flags...
+ *
+ * @param[in] pgdir the page directory to insert the page into
+ * @param[in] pp a pointr to the page struct representing the
+ * physical page that should be inserted.
+ * @param[in] va the virtual address where the page should be
+ * inserted.
+ * @param[in] perm the permition bits with which to set up the
+ * virtual mapping.
*
- * *pp_store -- is set to point to the Page struct
- * of the newly allocated page
+ * @return ESUCCESS on success
+ * @return -ENOMEM if a page table could not be allocated
+ * into which the page should be inserted
*
- * RETURNS
- * 0 -- on success
- * -ENOMEM -- otherwise
*/
-int page_alloc_specific(page_t **pp_store, size_t ppn)
-{
- page_t* page = ppn2page(ppn);
- if( page->pp_ref != 0 )
- return -ENOMEM;
- *pp_store = page;
- LIST_REMOVE(*pp_store, pp_link);
- page_initpp(*pp_store);
- return 0;
-}
-
-int page_is_free(size_t ppn) {
- page_t* page = ppn2page(ppn);
- if( page->pp_ref == 0 )
- return TRUE;
- return FALSE;
-}
-
-//
-// Return a page to the free list.
-// (This function should only be called when pp->pp_ref reaches 0.)
-//
-void page_free(page_t *pp)
-{
- // this check allows us to call this on null ptrs, which helps when
- // allocating and checking for errors on several pages at once
- if (pp) {
- if (pp->pp_ref)
- panic("Attempting to free page with non-zero reference count!");
- LIST_INSERT_HEAD(&page_free_list, pp, pp_link);
- }
-}
-
-//
-// Decrement the reference count on a page,
-// freeing it if there are no more refs.
-//
-void
-page_decref(page_t *pp)
-{
- if (--pp->pp_ref == 0)
- page_free(pp);
-}
-
-//
-// Map the physical page 'pp' at virtual address 'va'.
-// The permissions (the low 12 bits) of the page table
-// entry should be set to 'perm|PTE_P'.
-//
-// Details
-// - If there is already a page mapped at 'va', it is page_remove()d.
-// - If necessary, on demand, allocates a page table and inserts it into
-// 'pgdir'.
-// - pp->pp_ref should be incremented if the insertion succeeds.
-// - The TLB must be invalidated if a page was formerly present at 'va'.
-// (this is handled in page_remove)
-//
-// RETURNS:
-// 0 on success
-// -ENOMEM, if page table couldn't be allocated
-//
-// Hint: The TA solution is implemented using pgdir_walk, page_remove,
-// and page2pa.
-//
-// No support for jumbos here. will need to be careful of trying to insert
-// regular pages into something that was already jumbo, and the overloading
-// of the PTE_PS and PTE_PAT flags...
-int
-page_insert(pde_t *pgdir, page_t *pp, void *va, int perm)
+int page_insert(pde_t *pgdir, page_t *pp, void *va, int perm)
{
pte_t* pte = pgdir_walk(pgdir, va, 1);
if (!pte)
// need to up the ref count in case pp is already mapped at va
// and we don't want to page_remove (which could free pp) and then
// continue as if pp wasn't freed. moral = up the ref asap
- pp->pp_ref++;
+ page_incref(pp);
if (*pte & PTE_P) {
page_remove(pgdir, va);
}
return 0;
}
-//
-// Map the physical page 'pp' at the first virtual address that is free
-// in the range 'vab' to 'vae'.
-// The permissions (the low 12 bits) of the page table entry get set to
-// 'perm|PTE_P'.
-//
-// Details
-// - If there is no free entry in the range 'vab' to 'vae' this
-// function returns -ENOMEM.
-// - If necessary, on demand, this function will allocate a page table
-// and inserts it into 'pgdir'.
-// - pp->pp_ref should be incremented if the insertion succeeds.
-//
-// RETURNS:
-// NULL, if no free va in the range (vab, vae) could be found
-// va, the virtual address where pp has been mapped in the
-// range (vab, vae)
-//
+/**
+ * @brief Map the physical page 'pp' at the first virtual address that is free
+ * in the range 'vab' to 'vae' in page directory 'pgdir'.
+ *
+ * The permissions (the low 12 bits) of the page table entry get set to
+ * 'perm|PTE_P'.
+ *
+ * Details:
+ * - If there is no free entry in the range 'vab' to 'vae' this
+ * function returns NULL.
+ * - If necessary, on demand, this function will allocate a page table
+ * and inserts it into 'pgdir'.
+ * - page_incref() will be called if the insertion succeeds.
+ *
+ * @param[in] pgdir the page directory to insert the page into
+ * @param[in] pp a pointr to the page struct representing the
+ * physical page that should be inserted.
+ * @param[in] vab the first virtual address in the range in which the
+ * page can be inserted.
+ * @param[in] vae the last virtual address in the range in which the
+ * page can be inserted.
+ * @param[in] perm the permition bits with which to set up the
+ * virtual mapping.
+ *
+ * @return VA the virtual address where pp has been mapped in the
+ * range (vab, vae)
+ * @return NULL no free va in the range (vab, vae) could be found
+ */
void* page_insert_in_range(pde_t *pgdir, page_t *pp,
void *vab, void *vae, int perm)
{
pte_t* pte = NULL;
- void* new_va;
+ void*SNT new_va;
for(new_va = vab; new_va <= vae; new_va+= PGSIZE) {
pte = pgdir_walk(pgdir, new_va, 1);
}
if (!pte) return NULL;
*pte = page2pa(pp) | PTE_P | perm;
- return new_va;
+ return TC(new_va); // trusted because mapping a page is like allocation
}
-//
-// Return the page mapped at virtual address 'va'.
-// If pte_store is not zero, then we store in it the address
-// of the pte for this page. This is used by page_remove
-// but should not be used by other callers.
-//
-// Return 0 if there is no page mapped at va.
-//
-// Hint: the TA solution uses pgdir_walk and pa2page.
-//
-// For jumbos, right now this returns the first Page* in the 4MB
+/**
+ * @brief Return the page mapped at virtual address 'va' in
+ * page directory 'pgdir'.
+ *
+ * If pte_store is not NULL, then we store in it the address
+ * of the pte for this page. This is used by page_remove
+ * but should not be used by other callers.
+ *
+ * For jumbos, right now this returns the first Page* in the 4MB range
+ *
+ * @param[in] pgdir the page directory from which we should do the lookup
+ * @param[in] va the virtual address of the page we are looking up
+ * @param[out] pte_store the address of the page table entry for the returned page
+ *
+ * @return PAGE the page mapped at virtual address 'va'
+ * @return NULL No mapping exists at virtual address 'va'
+ */
page_t *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
{
pte_t* pte = pgdir_walk(pgdir, va, 0);
return pa2page(PTE_ADDR(*pte));
}
-//
-// Unmaps the physical page at virtual address 'va'.
-// If there is no physical page at that address, silently does nothing.
-//
-// Details:
-// - The ref count on the physical page should decrement.
-// - The physical page should be freed if the refcount reaches 0.
-// - The pg table entry corresponding to 'va' should be set to 0.
-// (if such a PTE exists)
-// - The TLB must be invalidated if you remove an entry from
-// the pg dir/pg table.
-//
-// Hint: The TA solution is implemented using page_lookup,
-// tlb_invalidate, and page_decref.
-//
-// This may be wonky wrt Jumbo pages and decref.
-void
-page_remove(pde_t *pgdir, void *va)
+/**
+ * @brief Unmaps the physical page at virtual address 'va' in page directory
+ * 'pgdir'.
+ *
+ * If there is no physical page at that address, this function silently
+ * does nothing.
+ *
+ * Details:
+ * - The ref count on the physical page is decrement when the page is removed
+ * - The physical page is freed if the refcount reaches 0.
+ * - The pg table entry corresponding to 'va' is set to 0.
+ * (if such a PTE exists)
+ * - The TLB is invalidated if an entry is removes from the pg dir/pg table.
+ *
+ * This may be wonky wrt Jumbo pages and decref.
+ *
+ * @param pgdir the page directory from with the page sholuld be removed
+ * @param va the virtual address at which the page we are trying to
+ * remove is mapped
+ */
+void page_remove(pde_t *pgdir, void *va)
{
pte_t* pte;
page_t *page;
page_decref(page);
}
-//
-// Invalidate a TLB entry, but only if the page tables being
-// edited are the ones currently in use by the processor.
-//
-// Need to sort this for cross core lovin' TODO
-void
-tlb_invalidate(pde_t *pgdir, void *va)
+/**
+ * @brief Invalidate a TLB entry, but only if the page tables being
+ * edited are the ones currently in use by the processor.
+ *
+ * TODO: Need to sort this for cross core lovin'
+ *
+ * @param pgdir the page directory assocaited with the tlb entry
+ * we are trying to invalidate
+ * @param va the virtual address associated with the tlb entry
+ * we are trying to invalidate
+ */
+void tlb_invalidate(pde_t *pgdir, void *va)
{
// Flush the entry only if we're modifying the current address space.
// For now, there is only one address space, so always invalidate.
invlpg(va);
}
-static void *DANGEROUS user_mem_check_addr;
-
-//
-// Check that an environment is allowed to access the range of memory
-// [va, va+len) with permissions 'perm | PTE_P'.
-// Normally 'perm' will contain PTE_U at least, but this is not required.
-// 'va' and 'len' need not be page-aligned; you must test every page that
-// contains any of that range. You will test either 'len/PGSIZE',
-// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
-//
-// A user program can access a virtual address if (1) the address is below
-// ULIM, and (2) the page table gives it permission. These are exactly
-// the tests you should implement here.
-//
-// If there is an error, set the 'user_mem_check_addr' variable to the first
-// erroneous virtual address.
-//
-// Returns 0 if the user program can access this range of addresses,
-// and -EFAULT otherwise.
-//
-// Hint: The TA solution uses pgdir_walk.
-//
-
-// zra: I've modified the interface to these two functions so that Ivy can
-// check that user pointers aren't dereferenced. User pointers get the
-// DANGEROUS qualifier. After validation, these functions return a
-// COUNT(len) pointer. user_mem_check now returns NULL on error instead of
-// -EFAULT.
-
-void *COUNT(len)
-user_mem_check(env_t *env, const void *DANGEROUS va, size_t len, int perm)
+/**
+ * @brief Check that an environment is allowed to access the range of memory
+ * [va, va+len) with permissions 'perm | PTE_P'.
+ *
+ * Normally 'perm' will contain PTE_U at least, but this is not required. The
+ * function get_va_perms only checks for PTE_U, PTE_W, and PTE_P. It won't
+ * check for things like PTE_PS, PTE_A, etc.
+ * 'va' and 'len' need not be page-aligned;
+ *
+ * A user program can access a virtual address if:
+ * -# the address is below ULIM
+ * -# the page table gives it permission.
+ *
+ * If there is an error, 'user_mem_check_addr' is set to the first
+ * erroneous virtual address.
+ *
+ * @param env the environment associated with the user program trying to access
+ * the virtual address range
+ * @param va the first virtual address in the range
+ * @param len the length of the virtual address range
+ * @param perm the permissions the user is trying to access the virtual address
+ * range with
+ *
+ * @return VA a pointer of type COUNT(len) to the address range
+ * @return NULL trying to access this range of virtual addresses is not allowed
+ */
+void* user_mem_check(env_t *env, const void *DANGEROUS va, size_t len, int perm)
{
+ if (len == 0) {
+ warn("Called user_mem_check with a len of 0. Don't do that. Returning NULL");
+ return NULL;
+ }
+
// TODO - will need to sort this out wrt page faulting / PTE_P
// also could be issues with sleeping and waking up to find pages
// are unmapped, though i think the lab ignores this since the
// kernel is uninterruptible
void *DANGEROUS start, *DANGEROUS end;
size_t num_pages, i;
- pte_t *pte;
+ int page_perms = 0;
perm |= PTE_P;
- start = ROUNDDOWN((void*)va, PGSIZE);
- end = ROUNDUP((void*)va + len, PGSIZE);
+ start = ROUNDDOWN((void*DANGEROUS)va, PGSIZE);
+ end = ROUNDUP((void*DANGEROUS)va + len, PGSIZE);
if (start >= end) {
warn("Blimey! Wrap around in VM range calculation!");
return NULL;
}
- num_pages = PPN(end - start);
+ num_pages = LA2PPN(end - start);
for (i = 0; i < num_pages; i++, start += PGSIZE) {
- pte = pgdir_walk(env->env_pgdir, start, 0);
+ page_perms = get_va_perms(env->env_pgdir, start);
// ensures the bits we want on are turned on. if not, error out
- if ( !pte || ((*pte & perm) != perm) ) {
- if (i = 0)
- user_mem_check_addr = (void*)va;
+ if ((page_perms & perm) != perm) {
+ if (i == 0)
+ user_mem_check_addr = (void*DANGEROUS)va;
else
user_mem_check_addr = start;
return NULL;
return (void *COUNT(len))TC(va);
}
-//
-// Checks that environment 'env' is allowed to access the range
-// of memory [va, va+len) with permissions 'perm | PTE_U'.
-// If it can, then the function simply returns.
-// If it cannot, 'env' is destroyed.
-//
-void *COUNT(len)
+/**
+ * @brief Use the kernel to copy a string from a buffer stored in userspace
+ * to a buffer stored elsewhere in the address space (potentially in
+ * memory only accessible by the kernel)
+ *
+ * @param env the environment associated with the user program from which
+ * the string is being copied
+ * @param dst the destination of the buffer into which the string
+ * is being copied
+ * @param va the start address of the buffer where the string resides
+ * @param len the length of the buffer
+ * @param perm the permissions with which the user is trying to access
+ * elements of the original buffer
+ *
+ * @return LEN the length of the new buffer copied into 'dst'
+ */
+size_t
+user_mem_strlcpy(env_t *env, char *_dst, const char *DANGEROUS va,
+ size_t _len, int perm)
+{
+ const char *DANGEROUS src = va;
+ size_t len = _len;
+ char *NT COUNT(_len-1) dst_in = _dst;
+ char *NT BND(_dst,_dst + _len - 1) dst = _dst;
+
+ if (len > 0) {
+ while (1) {
+ char *c;
+ // what if len was 1?
+ if (--len <= 0) break;
+ c = user_mem_check(env, src, 1, perm);
+ if (!c) break;
+ if (*c == '\0') break;
+ // TODO: ivy bitches about this
+ *dst++ = *c;
+ src++;
+ }
+ *dst = '\0';
+ }
+
+ return dst - dst_in;
+}
+
+/**
+ * @brief Checks that environment 'env' is allowed to access the range
+ * of memory [va, va+len) with permissions 'perm | PTE_U'. Destroy
+ * environment 'env' if the assertion fails.
+ *
+ * This function is identical to user_mem_assert() except that it has a side
+ * affect of destroying the environment 'env' if the memory check fails.
+ *
+ * @param env the environment associated with the user program trying to access
+ * the virtual address range
+ * @param va the first virtual address in the range
+ * @param len the length of the virtual address range
+ * @param perm the permissions the user is trying to access the virtual address
+ * range with
+ *
+ * @return VA a pointer of type COUNT(len) to the address range
+ * @return NULL trying to access this range of virtual addresses is not allowed
+ * environment 'env' is destroyed
+ */
+void *
user_mem_assert(env_t *env, const void *DANGEROUS va, size_t len, int perm)
{
- void *COUNT(len) res = user_mem_check(env,va,len,perm | PTE_USER_RO);
+ if (len == 0) {
+ warn("Called user_mem_assert with a len of 0. Don't do that. Returning NULL");
+ return NULL;
+ }
+
+ void *COUNT(len) res = user_mem_check(env,va,len,perm | PTE_USER_RO);
if (!res) {
cprintf("[%08x] user_mem_check assertion failure for "
- "va %08x\n", env->env_id, user_mem_check_addr);
- env_destroy(env); // may not return
+ "va %08x\n", env->pid, user_mem_check_addr);
+ proc_destroy(env); // may not return
return NULL;
}
return res;
}
-// copies data from a user buffer to a kernel buffer.
-// EFAULT if page not present, user lacks perms, or invalid addr.
-error_t
-memcpy_from_user(env_t* env, void* COUNT(len) dest,
+/**
+ * @brief Copies data from a user buffer to a kernel buffer.
+ *
+ * @param env the environment associated with the user program
+ * from which the buffer is being copied
+ * @param dest the destination address of the kernel buffer
+ * @param va the address of the userspace buffer from which we are copying
+ * @param len the length of the userspace buffer
+ *
+ * @return ESUCCESS on success
+ * @return -EFAULT the page assocaited with 'va' is not present, the user
+ * lacks the proper permissions, or there was an invalid 'va'
+ */
+error_t memcpy_from_user(env_t* env, void* COUNT(len) dest,
const void *DANGEROUS va, size_t len)
{
const void *DANGEROUS start, *DANGEROUS end;
if(start >= (void*SNT)ULIM || end >= (void*SNT)ULIM)
return -EFAULT;
- num_pages = PPN(end - start);
+ num_pages = LA2PPN(end - start);
for(i = 0; i < num_pages; i++)
{
pte = pgdir_walk(env->env_pgdir, start+i*PGSIZE, 0);
if(!pte || (*pte & perm) != perm)
return -EFAULT;
- void*COUNT(PGSIZE) kpage = KADDR(PTE_ADDR(pte));
- void* src_start = i > 0 ? kpage : kpage+(va-start);
+ void*COUNT(PGSIZE) kpage = KADDR(PTE_ADDR(*pte));
+ const void* src_start = i > 0 ? kpage : kpage+(va-start);
void* dst_start = dest+bytes_copied;
size_t copy_len = PGSIZE;
if(i == 0)
copy_len -= va-start;
if(i == num_pages-1)
- copy_len -= end-(start+len);
+ copy_len -= end-(va+len);
+
+ memcpy(dst_start,src_start,copy_len);
+ bytes_copied += copy_len;
+ }
+
+ assert(bytes_copied == len);
+
+ return ESUCCESS;
+}
+
+/**
+ * @brief Copies data to a user buffer from a kernel buffer.
+ *
+ * @param env the environment associated with the user program
+ * to which the buffer is being copied
+ * @param dest the destination address of the user buffer
+ * @param va the address of the kernel buffer from which we are copying
+ * @param len the length of the user buffer
+ *
+ * @return ESUCCESS on success
+ * @return -EFAULT the page assocaited with 'va' is not present, the user
+ * lacks the proper permissions, or there was an invalid 'va'
+ */
+error_t memcpy_to_user(env_t* env, void*DANGEROUS va,
+ const void *COUNT(len) src, size_t len)
+{
+ const void *DANGEROUS start, *DANGEROUS end;
+ size_t num_pages, i;
+ pte_t *pte;
+ uintptr_t perm = PTE_P | PTE_USER_RW;
+ size_t bytes_copied = 0;
+
+ static_assert(ULIM % PGSIZE == 0 && ULIM != 0); // prevent wrap-around
+
+ start = ROUNDDOWN(va, PGSIZE);
+ end = ROUNDUP(va + len, PGSIZE);
+
+ if(start >= (void*SNT)ULIM || end >= (void*SNT)ULIM)
+ return -EFAULT;
+
+ num_pages = LA2PPN(end - start);
+ for(i = 0; i < num_pages; i++)
+ {
+ pte = pgdir_walk(env->env_pgdir, start+i*PGSIZE, 0);
+ if(!pte || (*pte & perm) != perm)
+ return -EFAULT;
+
+ void*COUNT(PGSIZE) kpage = KADDR(PTE_ADDR(*pte));
+ void* dst_start = i > 0 ? kpage : kpage+(va-start);
+ const void* src_start = src+bytes_copied;
+ size_t copy_len = PGSIZE;
+ if(i == 0)
+ copy_len -= va-start;
+ if(i == num_pages-1)
+ copy_len -= end-(va+len);
memcpy(dst_start,src_start,copy_len);
bytes_copied += copy_len;