Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) by Jaroslav Kysela <perex@suse.cz> |
| 3 | * Copyright (c) by Takashi Iwai <tiwai@suse.de> |
| 4 | * Copyright (c) by Scott McNab <sdm@fractalgraphics.com.au> |
| 5 | * |
| 6 | * Trident 4DWave-NX memory page allocation (TLB area) |
| 7 | * Trident chip can handle only 16MByte of the memory at the same time. |
| 8 | * |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License as published by |
| 12 | * the Free Software Foundation; either version 2 of the License, or |
| 13 | * (at your option) any later version. |
| 14 | * |
| 15 | * This program is distributed in the hope that it will be useful, |
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | * GNU General Public License for more details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License |
| 21 | * along with this program; if not, write to the Free Software |
| 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 23 | * |
| 24 | */ |
| 25 | |
| 26 | #include <sound/driver.h> |
| 27 | #include <asm/io.h> |
| 28 | #include <linux/pci.h> |
| 29 | #include <linux/time.h> |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 30 | #include <linux/mutex.h> |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <sound/core.h> |
| 33 | #include <sound/trident.h> |
| 34 | |
| 35 | /* page arguments of these two macros are Trident page (4096 bytes), not like |
| 36 | * aligned pages in others |
| 37 | */ |
| 38 | #define __set_tlb_bus(trident,page,ptr,addr) \ |
| 39 | do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \ |
| 40 | (trident)->tlb.shadow_entries[page] = (ptr); } while (0) |
| 41 | #define __tlb_to_ptr(trident,page) \ |
| 42 | (void*)((trident)->tlb.shadow_entries[page]) |
| 43 | #define __tlb_to_addr(trident,page) \ |
| 44 | (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) |
| 45 | |
| 46 | #if PAGE_SIZE == 4096 |
| 47 | /* page size == SNDRV_TRIDENT_PAGE_SIZE */ |
| 48 | #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */ |
| 49 | #define MAX_ALIGN_PAGES SNDRV_TRIDENT_MAX_PAGES /* maxmium aligned pages */ |
| 50 | /* fill TLB entrie(s) corresponding to page with ptr */ |
| 51 | #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr) |
| 52 | /* fill TLB entrie(s) corresponding to page with silence pointer */ |
| 53 | #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr) |
| 54 | /* get aligned page from offset address */ |
| 55 | #define get_aligned_page(offset) ((offset) >> 12) |
| 56 | /* get offset address from aligned page */ |
| 57 | #define aligned_page_offset(page) ((page) << 12) |
| 58 | /* get buffer address from aligned page */ |
| 59 | #define page_to_ptr(trident,page) __tlb_to_ptr(trident, page) |
| 60 | /* get PCI physical address from aligned page */ |
| 61 | #define page_to_addr(trident,page) __tlb_to_addr(trident, page) |
| 62 | |
| 63 | #elif PAGE_SIZE == 8192 |
| 64 | /* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/ |
| 65 | #define ALIGN_PAGE_SIZE PAGE_SIZE |
| 66 | #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / 2) |
| 67 | #define get_aligned_page(offset) ((offset) >> 13) |
| 68 | #define aligned_page_offset(page) ((page) << 13) |
| 69 | #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) << 1) |
| 70 | #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1) |
| 71 | |
| 72 | /* fill TLB entries -- we need to fill two entries */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 73 | static inline void set_tlb_bus(struct snd_trident *trident, int page, |
| 74 | unsigned long ptr, dma_addr_t addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | { |
| 76 | page <<= 1; |
| 77 | __set_tlb_bus(trident, page, ptr, addr); |
| 78 | __set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE); |
| 79 | } |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 80 | static inline void set_silent_tlb(struct snd_trident *trident, int page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | { |
| 82 | page <<= 1; |
| 83 | __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); |
| 84 | __set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); |
| 85 | } |
| 86 | |
| 87 | #else |
| 88 | /* arbitrary size */ |
| 89 | #define UNIT_PAGES (PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE) |
| 90 | #define ALIGN_PAGE_SIZE (SNDRV_TRIDENT_PAGE_SIZE * UNIT_PAGES) |
| 91 | #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / UNIT_PAGES) |
| 92 | /* Note: if alignment doesn't match to the maximum size, the last few blocks |
| 93 | * become unusable. To use such blocks, you'll need to check the validity |
| 94 | * of accessing page in set_tlb_bus and set_silent_tlb. search_empty() |
| 95 | * should also check it, too. |
| 96 | */ |
| 97 | #define get_aligned_page(offset) ((offset) / ALIGN_PAGE_SIZE) |
| 98 | #define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE) |
| 99 | #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) * UNIT_PAGES) |
| 100 | #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES) |
| 101 | |
| 102 | /* fill TLB entries -- UNIT_PAGES entries must be filled */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 103 | static inline void set_tlb_bus(struct snd_trident *trident, int page, |
| 104 | unsigned long ptr, dma_addr_t addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | { |
| 106 | int i; |
| 107 | page *= UNIT_PAGES; |
| 108 | for (i = 0; i < UNIT_PAGES; i++, page++) { |
| 109 | __set_tlb_bus(trident, page, ptr, addr); |
| 110 | ptr += SNDRV_TRIDENT_PAGE_SIZE; |
| 111 | addr += SNDRV_TRIDENT_PAGE_SIZE; |
| 112 | } |
| 113 | } |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 114 | static inline void set_silent_tlb(struct snd_trident *trident, int page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | { |
| 116 | int i; |
| 117 | page *= UNIT_PAGES; |
| 118 | for (i = 0; i < UNIT_PAGES; i++, page++) |
| 119 | __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); |
| 120 | } |
| 121 | |
| 122 | #endif /* PAGE_SIZE */ |
| 123 | |
| 124 | /* calculate buffer pointer from offset address */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 125 | static inline void *offset_ptr(struct snd_trident *trident, int offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | { |
| 127 | char *ptr; |
| 128 | ptr = page_to_ptr(trident, get_aligned_page(offset)); |
| 129 | ptr += offset % ALIGN_PAGE_SIZE; |
| 130 | return (void*)ptr; |
| 131 | } |
| 132 | |
| 133 | /* first and last (aligned) pages of memory block */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 134 | #define firstpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page) |
| 135 | #define lastpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
| 137 | /* |
| 138 | * search empty pages which may contain given size |
| 139 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 140 | static struct snd_util_memblk * |
| 141 | search_empty(struct snd_util_memhdr *hdr, int size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 143 | struct snd_util_memblk *blk, *prev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | int page, psize; |
| 145 | struct list_head *p; |
| 146 | |
| 147 | psize = get_aligned_page(size + ALIGN_PAGE_SIZE -1); |
| 148 | prev = NULL; |
| 149 | page = 0; |
| 150 | list_for_each(p, &hdr->block) { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 151 | blk = list_entry(p, struct snd_util_memblk, list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | if (page + psize <= firstpg(blk)) |
| 153 | goto __found_pages; |
| 154 | page = lastpg(blk) + 1; |
| 155 | } |
| 156 | if (page + psize > MAX_ALIGN_PAGES) |
| 157 | return NULL; |
| 158 | |
| 159 | __found_pages: |
| 160 | /* create a new memory block */ |
| 161 | blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev); |
| 162 | if (blk == NULL) |
| 163 | return NULL; |
| 164 | blk->offset = aligned_page_offset(page); /* set aligned offset */ |
| 165 | firstpg(blk) = page; |
| 166 | lastpg(blk) = page + psize - 1; |
| 167 | return blk; |
| 168 | } |
| 169 | |
| 170 | |
| 171 | /* |
| 172 | * check if the given pointer is valid for pages |
| 173 | */ |
| 174 | static int is_valid_page(unsigned long ptr) |
| 175 | { |
| 176 | if (ptr & ~0x3fffffffUL) { |
Takashi Iwai | 99b359b | 2005-10-20 18:26:44 +0200 | [diff] [blame] | 177 | snd_printk(KERN_ERR "max memory size is 1GB!!\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | return 0; |
| 179 | } |
| 180 | if (ptr & (SNDRV_TRIDENT_PAGE_SIZE-1)) { |
Takashi Iwai | 99b359b | 2005-10-20 18:26:44 +0200 | [diff] [blame] | 181 | snd_printk(KERN_ERR "page is not aligned\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | return 0; |
| 183 | } |
| 184 | return 1; |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * page allocation for DMA (Scatter-Gather version) |
| 189 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 190 | static struct snd_util_memblk * |
| 191 | snd_trident_alloc_sg_pages(struct snd_trident *trident, |
| 192 | struct snd_pcm_substream *substream) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 194 | struct snd_util_memhdr *hdr; |
| 195 | struct snd_util_memblk *blk; |
| 196 | struct snd_pcm_runtime *runtime = substream->runtime; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | int idx, page; |
| 198 | struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream); |
| 199 | |
| 200 | snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes <= SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE, return NULL); |
| 201 | hdr = trident->tlb.memhdr; |
| 202 | snd_assert(hdr != NULL, return NULL); |
| 203 | |
| 204 | |
| 205 | |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 206 | mutex_lock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | blk = search_empty(hdr, runtime->dma_bytes); |
| 208 | if (blk == NULL) { |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 209 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | return NULL; |
| 211 | } |
| 212 | if (lastpg(blk) - firstpg(blk) >= sgbuf->pages) { |
| 213 | snd_printk(KERN_ERR "page calculation doesn't match: allocated pages = %d, trident = %d/%d\n", sgbuf->pages, firstpg(blk), lastpg(blk)); |
| 214 | __snd_util_mem_free(hdr, blk); |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 215 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | return NULL; |
| 217 | } |
| 218 | |
| 219 | /* set TLB entries */ |
| 220 | idx = 0; |
| 221 | for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) { |
| 222 | dma_addr_t addr = sgbuf->table[idx].addr; |
| 223 | unsigned long ptr = (unsigned long)sgbuf->table[idx].buf; |
| 224 | if (! is_valid_page(addr)) { |
| 225 | __snd_util_mem_free(hdr, blk); |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 226 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | return NULL; |
| 228 | } |
| 229 | set_tlb_bus(trident, page, ptr, addr); |
| 230 | } |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 231 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | return blk; |
| 233 | } |
| 234 | |
| 235 | /* |
| 236 | * page allocation for DMA (contiguous version) |
| 237 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 238 | static struct snd_util_memblk * |
| 239 | snd_trident_alloc_cont_pages(struct snd_trident *trident, |
| 240 | struct snd_pcm_substream *substream) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 242 | struct snd_util_memhdr *hdr; |
| 243 | struct snd_util_memblk *blk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | int page; |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 245 | struct snd_pcm_runtime *runtime = substream->runtime; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | dma_addr_t addr; |
| 247 | unsigned long ptr; |
| 248 | |
| 249 | snd_assert(runtime->dma_bytes> 0 && runtime->dma_bytes <= SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE, return NULL); |
| 250 | hdr = trident->tlb.memhdr; |
| 251 | snd_assert(hdr != NULL, return NULL); |
| 252 | |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 253 | mutex_lock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | blk = search_empty(hdr, runtime->dma_bytes); |
| 255 | if (blk == NULL) { |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 256 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | return NULL; |
| 258 | } |
| 259 | |
| 260 | /* set TLB entries */ |
| 261 | addr = runtime->dma_addr; |
| 262 | ptr = (unsigned long)runtime->dma_area; |
| 263 | for (page = firstpg(blk); page <= lastpg(blk); page++, |
| 264 | ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) { |
| 265 | if (! is_valid_page(addr)) { |
| 266 | __snd_util_mem_free(hdr, blk); |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 267 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | return NULL; |
| 269 | } |
| 270 | set_tlb_bus(trident, page, ptr, addr); |
| 271 | } |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 272 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | return blk; |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * page allocation for DMA |
| 278 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 279 | struct snd_util_memblk * |
| 280 | snd_trident_alloc_pages(struct snd_trident *trident, |
| 281 | struct snd_pcm_substream *substream) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | { |
| 283 | snd_assert(trident != NULL, return NULL); |
| 284 | snd_assert(substream != NULL, return NULL); |
| 285 | if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_SG) |
| 286 | return snd_trident_alloc_sg_pages(trident, substream); |
| 287 | else |
| 288 | return snd_trident_alloc_cont_pages(trident, substream); |
| 289 | } |
| 290 | |
| 291 | |
| 292 | /* |
| 293 | * release DMA buffer from page table |
| 294 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 295 | int snd_trident_free_pages(struct snd_trident *trident, |
| 296 | struct snd_util_memblk *blk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 298 | struct snd_util_memhdr *hdr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | int page; |
| 300 | |
| 301 | snd_assert(trident != NULL, return -EINVAL); |
| 302 | snd_assert(blk != NULL, return -EINVAL); |
| 303 | |
| 304 | hdr = trident->tlb.memhdr; |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 305 | mutex_lock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | /* reset TLB entries */ |
| 307 | for (page = firstpg(blk); page <= lastpg(blk); page++) |
| 308 | set_silent_tlb(trident, page); |
| 309 | /* free memory block */ |
| 310 | __snd_util_mem_free(hdr, blk); |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 311 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | return 0; |
| 313 | } |
| 314 | |
| 315 | |
| 316 | /*---------------------------------------------------------------- |
| 317 | * memory allocation using multiple pages (for synth) |
| 318 | *---------------------------------------------------------------- |
| 319 | * Unlike the DMA allocation above, non-contiguous pages are |
| 320 | * assigned to TLB. |
| 321 | *----------------------------------------------------------------*/ |
| 322 | |
| 323 | /* |
| 324 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 325 | static int synth_alloc_pages(struct snd_trident *hw, struct snd_util_memblk *blk); |
| 326 | static int synth_free_pages(struct snd_trident *hw, struct snd_util_memblk *blk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
| 328 | /* |
| 329 | * allocate a synth sample area |
| 330 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 331 | struct snd_util_memblk * |
| 332 | snd_trident_synth_alloc(struct snd_trident *hw, unsigned int size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 334 | struct snd_util_memblk *blk; |
| 335 | struct snd_util_memhdr *hdr = hw->tlb.memhdr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 337 | mutex_lock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | blk = __snd_util_mem_alloc(hdr, size); |
| 339 | if (blk == NULL) { |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 340 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | return NULL; |
| 342 | } |
| 343 | if (synth_alloc_pages(hw, blk)) { |
| 344 | __snd_util_mem_free(hdr, blk); |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 345 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | return NULL; |
| 347 | } |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 348 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | return blk; |
| 350 | } |
| 351 | |
Takashi Iwai | cbef55f | 2006-04-28 15:13:40 +0200 | [diff] [blame] | 352 | EXPORT_SYMBOL(snd_trident_synth_alloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | |
| 354 | /* |
| 355 | * free a synth sample area |
| 356 | */ |
| 357 | int |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 358 | snd_trident_synth_free(struct snd_trident *hw, struct snd_util_memblk *blk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 360 | struct snd_util_memhdr *hdr = hw->tlb.memhdr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 362 | mutex_lock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | synth_free_pages(hw, blk); |
| 364 | __snd_util_mem_free(hdr, blk); |
Ingo Molnar | 62932df | 2006-01-16 16:34:20 +0100 | [diff] [blame] | 365 | mutex_unlock(&hdr->block_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | return 0; |
| 367 | } |
| 368 | |
Takashi Iwai | cbef55f | 2006-04-28 15:13:40 +0200 | [diff] [blame] | 369 | EXPORT_SYMBOL(snd_trident_synth_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | |
| 371 | /* |
| 372 | * reset TLB entry and free kernel page |
| 373 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 374 | static void clear_tlb(struct snd_trident *trident, int page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | { |
| 376 | void *ptr = page_to_ptr(trident, page); |
| 377 | dma_addr_t addr = page_to_addr(trident, page); |
| 378 | set_silent_tlb(trident, page); |
| 379 | if (ptr) { |
| 380 | struct snd_dma_buffer dmab; |
| 381 | dmab.dev.type = SNDRV_DMA_TYPE_DEV; |
| 382 | dmab.dev.dev = snd_dma_pci_data(trident->pci); |
| 383 | dmab.area = ptr; |
| 384 | dmab.addr = addr; |
| 385 | dmab.bytes = ALIGN_PAGE_SIZE; |
| 386 | snd_dma_free_pages(&dmab); |
| 387 | } |
| 388 | } |
| 389 | |
| 390 | /* check new allocation range */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 391 | static void get_single_page_range(struct snd_util_memhdr *hdr, |
| 392 | struct snd_util_memblk *blk, |
| 393 | int *first_page_ret, int *last_page_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | { |
| 395 | struct list_head *p; |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 396 | struct snd_util_memblk *q; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | int first_page, last_page; |
| 398 | first_page = firstpg(blk); |
| 399 | if ((p = blk->list.prev) != &hdr->block) { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 400 | q = list_entry(p, struct snd_util_memblk, list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | if (lastpg(q) == first_page) |
| 402 | first_page++; /* first page was already allocated */ |
| 403 | } |
| 404 | last_page = lastpg(blk); |
| 405 | if ((p = blk->list.next) != &hdr->block) { |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 406 | q = list_entry(p, struct snd_util_memblk, list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | if (firstpg(q) == last_page) |
| 408 | last_page--; /* last page was already allocated */ |
| 409 | } |
| 410 | *first_page_ret = first_page; |
| 411 | *last_page_ret = last_page; |
| 412 | } |
| 413 | |
| 414 | /* |
| 415 | * allocate kernel pages and assign them to TLB |
| 416 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 417 | static int synth_alloc_pages(struct snd_trident *hw, struct snd_util_memblk *blk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | { |
| 419 | int page, first_page, last_page; |
| 420 | struct snd_dma_buffer dmab; |
| 421 | |
| 422 | firstpg(blk) = get_aligned_page(blk->offset); |
| 423 | lastpg(blk) = get_aligned_page(blk->offset + blk->size - 1); |
| 424 | get_single_page_range(hw->tlb.memhdr, blk, &first_page, &last_page); |
| 425 | |
| 426 | /* allocate a kernel page for each Trident page - |
| 427 | * fortunately Trident page size and kernel PAGE_SIZE is identical! |
| 428 | */ |
| 429 | for (page = first_page; page <= last_page; page++) { |
| 430 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(hw->pci), |
| 431 | ALIGN_PAGE_SIZE, &dmab) < 0) |
| 432 | goto __fail; |
| 433 | if (! is_valid_page(dmab.addr)) { |
| 434 | snd_dma_free_pages(&dmab); |
| 435 | goto __fail; |
| 436 | } |
| 437 | set_tlb_bus(hw, page, (unsigned long)dmab.area, dmab.addr); |
| 438 | } |
| 439 | return 0; |
| 440 | |
| 441 | __fail: |
| 442 | /* release allocated pages */ |
| 443 | last_page = page - 1; |
| 444 | for (page = first_page; page <= last_page; page++) |
| 445 | clear_tlb(hw, page); |
| 446 | |
| 447 | return -ENOMEM; |
| 448 | } |
| 449 | |
| 450 | /* |
| 451 | * free pages |
| 452 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 453 | static int synth_free_pages(struct snd_trident *trident, struct snd_util_memblk *blk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | { |
| 455 | int page, first_page, last_page; |
| 456 | |
| 457 | get_single_page_range(trident->tlb.memhdr, blk, &first_page, &last_page); |
| 458 | for (page = first_page; page <= last_page; page++) |
| 459 | clear_tlb(trident, page); |
| 460 | |
| 461 | return 0; |
| 462 | } |
| 463 | |
| 464 | /* |
| 465 | * copy_from_user(blk + offset, data, size) |
| 466 | */ |
Takashi Iwai | bee1a5b | 2005-11-17 14:53:15 +0100 | [diff] [blame] | 467 | int snd_trident_synth_copy_from_user(struct snd_trident *trident, |
| 468 | struct snd_util_memblk *blk, |
| 469 | int offset, const char __user *data, int size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | { |
| 471 | int page, nextofs, end_offset, temp, temp1; |
| 472 | |
| 473 | offset += blk->offset; |
| 474 | end_offset = offset + size; |
| 475 | page = get_aligned_page(offset) + 1; |
| 476 | do { |
| 477 | nextofs = aligned_page_offset(page); |
| 478 | temp = nextofs - offset; |
| 479 | temp1 = end_offset - offset; |
| 480 | if (temp1 < temp) |
| 481 | temp = temp1; |
| 482 | if (copy_from_user(offset_ptr(trident, offset), data, temp)) |
| 483 | return -EFAULT; |
| 484 | offset = nextofs; |
| 485 | data += temp; |
| 486 | page++; |
| 487 | } while (offset < end_offset); |
| 488 | return 0; |
| 489 | } |
| 490 | |
Takashi Iwai | cbef55f | 2006-04-28 15:13:40 +0200 | [diff] [blame] | 491 | EXPORT_SYMBOL(snd_trident_synth_copy_from_user); |