bypasscache for ukernel

simple (read) bypasscache for ukernel.
does not bother trying any buffercache stuff.

Change-Id: Ie75572da7efdb871a4ce807f02fbbb5bd7744c66
Reviewed-on: http://gerrit.openafs.org/5484
Tested-by: BuildBot <buildbot@rampaginggeek.com>
Reviewed-by: Derrick Brashear <shadow@dementix.org>
This commit is contained in:
Derrick Brashear 2011-09-21 15:02:40 -04:00
parent 65bdade6d5
commit d1cf14d54e
6 changed files with 162 additions and 64 deletions

View File

@ -29,6 +29,7 @@
#include "afs/kauth.h"
#include "afs/kautils.h"
#include "afs/afsutil.h"
#include "afs/afs_bypasscache.h"
#include "rx/rx_globals.h"
#include "afsd/afsd.h"
@ -2370,6 +2371,71 @@ uafs_read(int fd, char *buf, int len)
return retval;
}
int
uafs_pread_nocache(int fd, char *buf, int len, off_t offset)
{
int retval;
AFS_GLOCK();
retval = uafs_pread_nocache_r(fd, buf, len, offset);
AFS_GUNLOCK();
return retval;
}
int
uafs_pread_nocache_r(int fd, char *buf, int len, off_t offset)
{
int code;
struct iovec iov[1];
struct usr_vnode *fileP;
struct nocache_read_request *bparms;
struct usr_uio uio;
/*
* Make sure this is an open file
*/
fileP = afs_FileTable[fd];
if (fileP == NULL) {
errno = EBADF;
return -1;
}
/* these get freed in PrefetchNoCache, so... */
bparms = afs_osi_Alloc(sizeof(struct nocache_read_request));
bparms->areq = afs_osi_Alloc(sizeof(struct vrequest));
afs_InitReq(bparms->areq, get_user_struct()->u_cred);
bparms->auio = &uio;
bparms->offset = offset;
bparms->length = len;
/*
* set up the uio buffer
*/
iov[0].iov_base = buf;
iov[0].iov_len = len;
uio.uio_iov = &iov[0];
uio.uio_iovcnt = 1;
uio.uio_offset = offset;
uio.uio_segflg = 0;
uio.uio_fmode = FREAD;
uio.uio_resid = len;
/*
* do the read
*/
code = afs_PrefetchNoCache(VTOAFS(fileP), get_user_struct()->u_cred,
bparms);
if (code) {
errno = code;
return -1;
}
afs_FileOffsets[fd] = uio.uio_offset;
return (len - uio.uio_resid);
}
int
uafs_pread(int fd, char *buf, int len, off_t offset)
{

View File

@ -90,6 +90,8 @@ extern int uafs_pwrite_r(int fd, char *buf, int len, off_t offset);
extern int uafs_read(int fd, char *buf, int len);
extern int uafs_pread(int fd, char *buf, int leni, off_t offset);
extern int uafs_pread_r(int fd, char *buf, int len, off_t offset);
extern int uafs_pread_nocache(int fd, char *buf, int leni, off_t offset);
extern int uafs_pread_nocache_r(int fd, char *buf, int len, off_t offset);
extern int uafs_fsync(int fd);
extern int uafs_fsync_r(int fd);
extern int uafs_close(int fd);

View File

@ -129,10 +129,7 @@ struct sysname_info {
#define BOP_FETCH 1 /* parm1 is chunk to get */
#define BOP_STORE 2 /* parm1 is chunk to store */
#define BOP_PATH 3 /* parm1 is path, parm2 is chunk to fetch */
#if defined(AFS_CACHE_BYPASS)
#define BOP_FETCH_NOCACHE 4 /* parms are: vnode ptr, offset, segment ptr, addr, cred ptr */
#endif
#ifdef AFS_DARWIN_ENV
#define BOP_MOVE 5 /* ptr1 afs_uspc_param ptr2 sname ptr3 dname */
#endif
@ -682,7 +679,6 @@ struct SimpleLocks {
/*... to be continued ... */
#if defined(AFS_CACHE_BYPASS)
/* vcache (file) cachingStates bits */
#define FCSDesireBypass 0x1 /* This file should bypass the cache */
#define FCSBypass 0x2 /* This file is currently NOT being cached */
@ -696,7 +692,6 @@ struct SimpleLocks {
* lock vcache (it's already locked) */
#define TRANSSetManualBit 0x4 /* The Transition routine should set FCSManuallySet so that
* filename checking does not override pioctl requests */
#endif /* AFS_CACHE_BYPASS */
#define CPSIZE 2
#if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
@ -893,14 +888,13 @@ struct vcache {
short flockCount; /* count of flock readers, or -1 if writer */
char mvstat; /* 0->normal, 1->mt pt, 2->root. */
#if defined(AFS_CACHE_BYPASS)
char cachingStates; /* Caching policies for this file */
afs_uint32 cachingTransitions; /* # of times file has flopped between caching and not */
char cachingStates; /* Caching policies for this file */
afs_uint32 cachingTransitions; /* # of times file has flopped between caching and not */
#if defined(AFS_LINUX24_ENV)
off_t next_seq_offset; /* Next sequential offset (used by prefetch/readahead) */
#else
off_t next_seq_blk_offset; /* accounted in blocks for Solaris & IRIX */
#endif
off_t next_seq_offset; /* Next sequential offset (used by prefetch/readahead) */
#elif defined(AFS_SUN5_ENV) || defined(AFS_SGI65_ENV)
off_t next_seq_blk_offset; /* accounted in blocks for Solaris & IRIX */
#endif
#if defined(AFS_SUN5_ENV)

View File

@ -60,9 +60,7 @@
#include <afsconfig.h>
#include "afs/param.h"
#if defined(AFS_CACHE_BYPASS) && defined(AFS_LINUX24_ENV)
#if defined(AFS_CACHE_BYPASS) || defined(UKERNEL)
#include "afs/afs_bypasscache.h"
/*
@ -269,15 +267,60 @@ done:
* afs_PrefetchNoCache, all of the pages they've been passed need
* to be unlocked.
*/
#ifdef UKERNEL
typedef void * bypass_page_t;
#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio) \
do { \
memcpy(((char *)pp) + pageoff, \
((char *)rxiov[iovno].iov_base) + iovoff, \
PAGE_CACHE_SIZE - pageoff); \
auio->uio_resid -= (PAGE_CACHE_SIZE - pageoff); \
} while(0)
#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio) \
do { \
memcpy(((char *)pp) + pageoff, \
((char *)rxiov[iovno].iov_base) + iovoff, \
rxiov[iovno].iov_len - iovoff); \
auio->uio_resid -= (rxiov[iovno].iov_len - iovoff); \
} while(0)
#define unlock_and_release_pages(auio)
#define release_full_page(pp)
#else
typedef struct page * bypass_page_t;
#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio) \
do { \
char *address; \
address = kmap_atomic(pp, KM_USER0); \
memcpy(address + pageoff, \
(char *)(rxiov[iovno].iov_base) + iovoff, \
PAGE_CACHE_SIZE - pageoff); \
kunmap_atomic(address, KM_USER0); \
} while(0)
#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio) \
do { \
char *address; \
address = kmap_atomic(pp, KM_USER0); \
memcpy(address + pageoff, \
(char *)(rxiov[iovno].iov_base) + iovoff, \
rxiov[iovno].iov_len - iovoff); \
kunmap_atomic(address, KM_USER0); \
} while(0)
#define unlock_and_release_pages(auio) \
do { \
struct iovec *ciov; \
struct page *pp; \
bypass_page_t pp; \
afs_int32 iovmax; \
afs_int32 iovno = 0; \
ciov = auio->uio_iov; \
iovmax = auio->uio_iovcnt - 1; \
pp = (struct page*) ciov->iov_base; \
pp = (bypass_page_t) ciov->iov_base; \
while(1) { \
if (pp) { \
if (PageLocked(pp)) \
@ -288,10 +331,24 @@ done:
if(iovno > iovmax) \
break; \
ciov = (auio->uio_iov + iovno); \
pp = (struct page*) ciov->iov_base; \
pp = (bypass_page_t) ciov->iov_base; \
} \
} while(0)
#define release_full_page(pp) \
do { \
/* this is appropriate when no caller intends to unlock \
* and release the page */ \
SetPageUptodate(pp); \
if(PageLocked(pp)) \
unlock_page(pp); \
else \
afs_warn("afs_NoCacheFetchProc: page not locked!\n"); \
put_page(pp); /* decrement refcount */ \
} while(0)
#endif
/* no-cache prefetch routine */
static afs_int32
afs_NoCacheFetchProc(struct rx_call *acall,
@ -305,16 +362,15 @@ afs_NoCacheFetchProc(struct rx_call *acall,
int moredata, iovno, iovoff, iovmax, result, locked;
struct iovec *ciov;
struct iovec *rxiov;
int nio;
struct page *pp;
char *address;
int nio = 0;
bypass_page_t pp;
int curpage, bytes;
int pageoff;
rxiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
ciov = auio->uio_iov;
pp = (struct page*) ciov->iov_base;
pp = (bypass_page_t) ciov->iov_base;
iovmax = auio->uio_iovcnt - 1;
iovno = iovoff = result = 0;
@ -364,7 +420,8 @@ afs_NoCacheFetchProc(struct rx_call *acall,
for (curpage = 0; curpage <= iovmax; curpage++) {
pageoff = 0;
while (pageoff < 4096) {
/* properly, this should track uio_resid, not a fixed page size! */
while (pageoff < PAGE_CACHE_SIZE) {
/* If no more iovs, issue new read. */
if (iovno >= nio) {
COND_GUNLOCK(locked);
@ -384,41 +441,27 @@ afs_NoCacheFetchProc(struct rx_call *acall,
length -= bytes;
iovno = 0;
}
pp = (struct page *)auio->uio_iov[curpage].iov_base;
pp = (bypass_page_t)auio->uio_iov[curpage].iov_base;
if (pageoff + (rxiov[iovno].iov_len - iovoff) <= PAGE_CACHE_SIZE) {
/* Copy entire (or rest of) current iovec into current page */
if (pp) {
address = kmap_atomic(pp, KM_USER0);
memcpy(address + pageoff, rxiov[iovno].iov_base + iovoff,
rxiov[iovno].iov_len - iovoff);
kunmap_atomic(address, KM_USER0);
}
if (pp)
copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio);
pageoff += rxiov[iovno].iov_len - iovoff;
iovno++;
iovoff = 0;
} else {
/* Copy only what's needed to fill current page */
if (pp) {
address = kmap_atomic(pp, KM_USER0);
memcpy(address + pageoff, rxiov[iovno].iov_base + iovoff,
PAGE_CACHE_SIZE - pageoff);
kunmap_atomic(address, KM_USER0);
}
if (pp)
copy_page(pp, pageoff, rxiov, iovno, iovoff, auio);
iovoff += PAGE_CACHE_SIZE - pageoff;
pageoff = PAGE_CACHE_SIZE;
}
/* we filled a page, or this is the last page. conditionally release it */
if (pp && ((pageoff == PAGE_CACHE_SIZE && release_pages)
|| (length == 0 && iovno >= nio))) {
/* this is appropriate when no caller intends to unlock
* and release the page */
SetPageUptodate(pp);
if(PageLocked(pp))
unlock_page(pp);
else
afs_warn("afs_NoCacheFetchProc: page not locked!\n");
put_page(pp); /* decrement refcount */
}
|| (length == 0 && iovno >= nio)))
release_full_page(pp);
if (length == 0 && iovno >= nio)
goto done;
}
@ -615,10 +658,12 @@ done:
osi_Free(areq, sizeof(struct vrequest));
osi_Free(tcallspec, sizeof(struct tlocal1));
osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
osi_Free(bparms, sizeof(struct nocache_read_request));
#ifndef UKERNEL
/* in UKERNEL, the "pages" are passed in */
osi_Free(auio, sizeof(struct uio));
osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
#endif
return code;
}
#endif /* AFS_CACHE_BYPASS && AFS_LINUX24_ENV */
#endif

View File

@ -61,8 +61,7 @@
#ifndef _AFS_BYPASSCACHE_H
#define _AFS_BYPASSCACHE_H
#if defined(AFS_CACHE_BYPASS)
#if defined(AFS_CACHE_BYPASS) || defined(UKERNEL)
#include <afsconfig.h>
#include "afs/param.h"
#include "afs/sysincludes.h"
@ -71,8 +70,8 @@
#define AFS_CACHE_BYPASS_DISABLED -1
#ifdef UKERNEL
#ifndef PAGE_SIZE
#define PAGE_SIZE 1024 * sizeof(long) / 8
#ifndef PAGE_CACHE_SIZE
#define PAGE_CACHE_SIZE 4096
#endif
#endif
@ -85,13 +84,7 @@ struct nocache_read_request {
u_offset_t offset;
struct seg *segment;
caddr_t address;
#elif defined(AFS_SGI_ENV)
/* SGI (of some vintage) */
int32 offset;
int32 rem;
int32 pmp; /* mmm */
int32 length;
#elif defined(AFS_LINUX24_ENV) || defined(AFS_USR_LINUX24_ENV)
#elif defined(AFS_LINUX24_ENV) || defined(UKERNEL)
/* The tested platform, as CITI impl. just packs ab->parms */
struct uio *auio;
struct vrequest *areq;
@ -146,7 +139,5 @@ afs_int32
afs_PrefetchNoCache(struct vcache *avc, afs_ucred_t *acred,
struct nocache_read_request *bparms);
#endif /* AFS_CACHE_BYPASS */
#endif /* AFS_CACHE_BYPASS || UKERNEL */
#endif /* _AFS_BYPASSCACHE_H */

View File

@ -26,7 +26,7 @@
#if defined(AFS_CACHE_BYPASS)
#include "afs/afs_bypasscache.h"
#endif// defined(AFS_CACHE_BYPASS)
#endif /* AFS_CACHE_BYPASS */
/* background request queue size */
afs_lock_t afs_xbrs; /* lock for brs */
static int brsInit = 0;