bypasscache: allow arbitrary sized iovecs

instead of binding outselves to PAGE_SIZE, just fill any size

Change-Id: I01385df372ac2bda6d8ea24381bf38102c12615f
Reviewed-on: http://gerrit.openafs.org/5658
Tested-by: Derrick Brashear <shadow@dementix.org>
Reviewed-by: Derrick Brashear <shadow@dementix.org>
This commit is contained in:
Derrick Brashear 2011-10-23 19:15:16 -04:00
parent 28a159b2bf
commit 4b817cbb52

View File

@ -270,44 +270,45 @@ done:
#ifdef UKERNEL #ifdef UKERNEL
typedef void * bypass_page_t; typedef void * bypass_page_t;
#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio) \ #define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
do { \ do { \
int dolen = auio->uio_iov[curiov].iov_len - pageoff; \
memcpy(((char *)pp) + pageoff, \ memcpy(((char *)pp) + pageoff, \
((char *)rxiov[iovno].iov_base) + iovoff, \ ((char *)rxiov[iovno].iov_base) + iovoff, dolen); \
PAGE_CACHE_SIZE - pageoff); \ auio->uio_resid -= dolen; \
auio->uio_resid -= (PAGE_CACHE_SIZE - pageoff); \
} while(0) } while(0)
#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio) \ #define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
do { \ do { \
int dolen = rxiov[iovno].iov_len - iovoff; \
memcpy(((char *)pp) + pageoff, \ memcpy(((char *)pp) + pageoff, \
((char *)rxiov[iovno].iov_base) + iovoff, \ ((char *)rxiov[iovno].iov_base) + iovoff, dolen); \
rxiov[iovno].iov_len - iovoff); \ auio->uio_resid -= dolen; \
auio->uio_resid -= (rxiov[iovno].iov_len - iovoff); \
} while(0) } while(0)
#define unlock_and_release_pages(auio) #define unlock_and_release_pages(auio)
#define release_full_page(pp) #define release_full_page(pp, pageoff)
#else #else
typedef struct page * bypass_page_t; typedef struct page * bypass_page_t;
#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio) \ #define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
do { \ do { \
char *address; \ char *address; \
int dolen = auio->uio_iov[curiov].iov_len - pageoff; \
address = kmap_atomic(pp, KM_USER0); \ address = kmap_atomic(pp, KM_USER0); \
memcpy(address + pageoff, \ memcpy(address + pageoff, \
(char *)(rxiov[iovno].iov_base) + iovoff, \ (char *)(rxiov[iovno].iov_base) + iovoff, dolen); \
PAGE_CACHE_SIZE - pageoff); \
kunmap_atomic(address, KM_USER0); \ kunmap_atomic(address, KM_USER0); \
} while(0) } while(0)
#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio) \ #define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
do { \ do { \
char *address; \ char *address; \
int dolen = rxiov[iovno].iov_len - iovoff; \
address = kmap_atomic(pp, KM_USER0); \ address = kmap_atomic(pp, KM_USER0); \
memcpy(address + pageoff, \ memcpy(address + pageoff, \
(char *)(rxiov[iovno].iov_base) + iovoff, \ (char *)(rxiov[iovno].iov_base) + iovoff, dolen); \
rxiov[iovno].iov_len - iovoff); \
kunmap_atomic(address, KM_USER0); \ kunmap_atomic(address, KM_USER0); \
} while(0) } while(0)
@ -335,7 +336,7 @@ typedef struct page * bypass_page_t;
} \ } \
} while(0) } while(0)
#define release_full_page(pp) \ #define release_full_page(pp, pageoff) \
do { \ do { \
/* this is appropriate when no caller intends to unlock \ /* this is appropriate when no caller intends to unlock \
* and release the page */ \ * and release the page */ \
@ -421,7 +422,7 @@ afs_NoCacheFetchProc(struct rx_call *acall,
for (curpage = 0; curpage <= iovmax; curpage++) { for (curpage = 0; curpage <= iovmax; curpage++) {
pageoff = 0; pageoff = 0;
/* properly, this should track uio_resid, not a fixed page size! */ /* properly, this should track uio_resid, not a fixed page size! */
while (pageoff < PAGE_CACHE_SIZE) { while (pageoff < auio->uio_iov[curpage].iov_len) {
/* If no more iovs, issue new read. */ /* If no more iovs, issue new read. */
if (iovno >= nio) { if (iovno >= nio) {
COND_GUNLOCK(locked); COND_GUNLOCK(locked);
@ -440,29 +441,30 @@ afs_NoCacheFetchProc(struct rx_call *acall,
goto done; goto done;
} }
size -= bytes; size -= bytes;
length -= bytes;
iovno = 0; iovno = 0;
} }
pp = (bypass_page_t)auio->uio_iov[curpage].iov_base; pp = (bypass_page_t)auio->uio_iov[curpage].iov_base;
if (pageoff + (rxiov[iovno].iov_len - iovoff) <= PAGE_CACHE_SIZE) { if (pageoff + (rxiov[iovno].iov_len - iovoff) <= auio->uio_iov[curpage].iov_len) {
/* Copy entire (or rest of) current iovec into current page */ /* Copy entire (or rest of) current iovec into current page */
if (pp) if (pp)
copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio); copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curpage);
length -= (rxiov[iovno].iov_len - iovoff);
pageoff += rxiov[iovno].iov_len - iovoff; pageoff += rxiov[iovno].iov_len - iovoff;
iovno++; iovno++;
iovoff = 0; iovoff = 0;
} else { } else {
/* Copy only what's needed to fill current page */ /* Copy only what's needed to fill current page */
if (pp) if (pp)
copy_page(pp, pageoff, rxiov, iovno, iovoff, auio); copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage);
iovoff += PAGE_CACHE_SIZE - pageoff; length -= (auio->uio_iov[curpage].iov_len - pageoff);
pageoff = PAGE_CACHE_SIZE; iovoff += auio->uio_iov[curpage].iov_len - pageoff;
pageoff = auio->uio_iov[curpage].iov_len;
} }
/* we filled a page, or this is the last page. conditionally release it */ /* we filled a page, or this is the last page. conditionally release it */
if (pp && ((pageoff == PAGE_CACHE_SIZE && release_pages) if (pp && ((pageoff == auio->uio_iov[curpage].iov_len &&
|| (length == 0 && iovno >= nio))) release_pages) || (length == 0 && iovno >= nio)))
release_full_page(pp); release_full_page(pp, pageoff);
if (length == 0 && iovno >= nio) if (length == 0 && iovno >= nio)
goto done; goto done;