Root/drivers/gpu/drm/drm_vm.c

1/**
2 * \file drm_vm.c
3 * Memory mapping for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37#include <linux/export.h>
38#if defined(__ia64__)
39#include <linux/efi.h>
40#include <linux/slab.h>
41#endif
42
43static void drm_vm_open(struct vm_area_struct *vma);
44static void drm_vm_close(struct vm_area_struct *vma);
45
46static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
47{
48    pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
49
50#if defined(__i386__) || defined(__x86_64__)
51    if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
52        pgprot_val(tmp) |= _PAGE_PCD;
53        pgprot_val(tmp) &= ~_PAGE_PWT;
54    }
55#elif defined(__powerpc__)
56    pgprot_val(tmp) |= _PAGE_NO_CACHE;
57    if (map_type == _DRM_REGISTERS)
58        pgprot_val(tmp) |= _PAGE_GUARDED;
59#elif defined(__ia64__)
60    if (efi_range_is_wc(vma->vm_start, vma->vm_end -
61                    vma->vm_start))
62        tmp = pgprot_writecombine(tmp);
63    else
64        tmp = pgprot_noncached(tmp);
65#elif defined(__sparc__) || defined(__arm__)
66    tmp = pgprot_noncached(tmp);
67#endif
68    return tmp;
69}
70
71static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
72{
73    pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
74
75#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
76    tmp |= _PAGE_NO_CACHE;
77#endif
78    return tmp;
79}
80
81/**
82 * \c fault method for AGP virtual memory.
83 *
84 * \param vma virtual memory area.
85 * \param address access address.
86 * \return pointer to the page structure.
87 *
88 * Find the right map and if it's AGP memory find the real physical page to
89 * map, get the page, increment the use count and return it.
90 */
91#if __OS_HAS_AGP
92static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
93{
94    struct drm_file *priv = vma->vm_file->private_data;
95    struct drm_device *dev = priv->minor->dev;
96    struct drm_local_map *map = NULL;
97    struct drm_map_list *r_list;
98    struct drm_hash_item *hash;
99
100    /*
101     * Find the right map
102     */
103    if (!drm_core_has_AGP(dev))
104        goto vm_fault_error;
105
106    if (!dev->agp || !dev->agp->cant_use_aperture)
107        goto vm_fault_error;
108
109    if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
110        goto vm_fault_error;
111
112    r_list = drm_hash_entry(hash, struct drm_map_list, hash);
113    map = r_list->map;
114
115    if (map && map->type == _DRM_AGP) {
116        /*
117         * Using vm_pgoff as a selector forces us to use this unusual
118         * addressing scheme.
119         */
120        resource_size_t offset = (unsigned long)vmf->virtual_address -
121            vma->vm_start;
122        resource_size_t baddr = map->offset + offset;
123        struct drm_agp_mem *agpmem;
124        struct page *page;
125
126#ifdef __alpha__
127        /*
128         * Adjust to a bus-relative address
129         */
130        baddr -= dev->hose->mem_space->start;
131#endif
132
133        /*
134         * It's AGP memory - find the real physical page to map
135         */
136        list_for_each_entry(agpmem, &dev->agp->memory, head) {
137            if (agpmem->bound <= baddr &&
138                agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
139                break;
140        }
141
142        if (&agpmem->head == &dev->agp->memory)
143            goto vm_fault_error;
144
145        /*
146         * Get the page, inc the use count, and return it
147         */
148        offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
149        page = agpmem->memory->pages[offset];
150        get_page(page);
151        vmf->page = page;
152
153        DRM_DEBUG
154            ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
155             (unsigned long long)baddr,
156             agpmem->memory->pages[offset],
157             (unsigned long long)offset,
158             page_count(page));
159        return 0;
160    }
161vm_fault_error:
162    return VM_FAULT_SIGBUS; /* Disallow mremap */
163}
164#else /* __OS_HAS_AGP */
165static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
166{
167    return VM_FAULT_SIGBUS;
168}
169#endif /* __OS_HAS_AGP */
170
171/**
172 * \c nopage method for shared virtual memory.
173 *
174 * \param vma virtual memory area.
175 * \param address access address.
176 * \return pointer to the page structure.
177 *
178 * Get the mapping, find the real physical page to map, get the page, and
179 * return it.
180 */
181static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
182{
183    struct drm_local_map *map = vma->vm_private_data;
184    unsigned long offset;
185    unsigned long i;
186    struct page *page;
187
188    if (!map)
189        return VM_FAULT_SIGBUS; /* Nothing allocated */
190
191    offset = (unsigned long)vmf->virtual_address - vma->vm_start;
192    i = (unsigned long)map->handle + offset;
193    page = vmalloc_to_page((void *)i);
194    if (!page)
195        return VM_FAULT_SIGBUS;
196    get_page(page);
197    vmf->page = page;
198
199    DRM_DEBUG("shm_fault 0x%lx\n", offset);
200    return 0;
201}
202
203/**
204 * \c close method for shared virtual memory.
205 *
206 * \param vma virtual memory area.
207 *
208 * Deletes map information if we are the last
209 * person to close a mapping and it's not in the global maplist.
210 */
211static void drm_vm_shm_close(struct vm_area_struct *vma)
212{
213    struct drm_file *priv = vma->vm_file->private_data;
214    struct drm_device *dev = priv->minor->dev;
215    struct drm_vma_entry *pt, *temp;
216    struct drm_local_map *map;
217    struct drm_map_list *r_list;
218    int found_maps = 0;
219
220    DRM_DEBUG("0x%08lx,0x%08lx\n",
221          vma->vm_start, vma->vm_end - vma->vm_start);
222    atomic_dec(&dev->vma_count);
223
224    map = vma->vm_private_data;
225
226    mutex_lock(&dev->struct_mutex);
227    list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
228        if (pt->vma->vm_private_data == map)
229            found_maps++;
230        if (pt->vma == vma) {
231            list_del(&pt->head);
232            kfree(pt);
233        }
234    }
235
236    /* We were the only map that was found */
237    if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
238        /* Check to see if we are in the maplist, if we are not, then
239         * we delete this mappings information.
240         */
241        found_maps = 0;
242        list_for_each_entry(r_list, &dev->maplist, head) {
243            if (r_list->map == map)
244                found_maps++;
245        }
246
247        if (!found_maps) {
248            drm_dma_handle_t dmah;
249
250            switch (map->type) {
251            case _DRM_REGISTERS:
252            case _DRM_FRAME_BUFFER:
253                if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
254                    int retcode;
255                    retcode = mtrr_del(map->mtrr,
256                               map->offset,
257                               map->size);
258                    DRM_DEBUG("mtrr_del = %d\n", retcode);
259                }
260                iounmap(map->handle);
261                break;
262            case _DRM_SHM:
263                vfree(map->handle);
264                break;
265            case _DRM_AGP:
266            case _DRM_SCATTER_GATHER:
267                break;
268            case _DRM_CONSISTENT:
269                dmah.vaddr = map->handle;
270                dmah.busaddr = map->offset;
271                dmah.size = map->size;
272                __drm_pci_free(dev, &dmah);
273                break;
274            case _DRM_GEM:
275                DRM_ERROR("tried to rmmap GEM object\n");
276                break;
277            }
278            kfree(map);
279        }
280    }
281    mutex_unlock(&dev->struct_mutex);
282}
283
284/**
285 * \c fault method for DMA virtual memory.
286 *
287 * \param vma virtual memory area.
288 * \param address access address.
289 * \return pointer to the page structure.
290 *
291 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
292 */
293static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
294{
295    struct drm_file *priv = vma->vm_file->private_data;
296    struct drm_device *dev = priv->minor->dev;
297    struct drm_device_dma *dma = dev->dma;
298    unsigned long offset;
299    unsigned long page_nr;
300    struct page *page;
301
302    if (!dma)
303        return VM_FAULT_SIGBUS; /* Error */
304    if (!dma->pagelist)
305        return VM_FAULT_SIGBUS; /* Nothing allocated */
306
307    offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
308    page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
309    page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
310
311    get_page(page);
312    vmf->page = page;
313
314    DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
315    return 0;
316}
317
318/**
319 * \c fault method for scatter-gather virtual memory.
320 *
321 * \param vma virtual memory area.
322 * \param address access address.
323 * \return pointer to the page structure.
324 *
325 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
326 */
327static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
328{
329    struct drm_local_map *map = vma->vm_private_data;
330    struct drm_file *priv = vma->vm_file->private_data;
331    struct drm_device *dev = priv->minor->dev;
332    struct drm_sg_mem *entry = dev->sg;
333    unsigned long offset;
334    unsigned long map_offset;
335    unsigned long page_offset;
336    struct page *page;
337
338    if (!entry)
339        return VM_FAULT_SIGBUS; /* Error */
340    if (!entry->pagelist)
341        return VM_FAULT_SIGBUS; /* Nothing allocated */
342
343    offset = (unsigned long)vmf->virtual_address - vma->vm_start;
344    map_offset = map->offset - (unsigned long)dev->sg->virtual;
345    page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
346    page = entry->pagelist[page_offset];
347    get_page(page);
348    vmf->page = page;
349
350    return 0;
351}
352
353static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
354{
355    return drm_do_vm_fault(vma, vmf);
356}
357
358static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
359{
360    return drm_do_vm_shm_fault(vma, vmf);
361}
362
363static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364{
365    return drm_do_vm_dma_fault(vma, vmf);
366}
367
368static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369{
370    return drm_do_vm_sg_fault(vma, vmf);
371}
372
373/** AGP virtual memory operations */
374static const struct vm_operations_struct drm_vm_ops = {
375    .fault = drm_vm_fault,
376    .open = drm_vm_open,
377    .close = drm_vm_close,
378};
379
380/** Shared virtual memory operations */
381static const struct vm_operations_struct drm_vm_shm_ops = {
382    .fault = drm_vm_shm_fault,
383    .open = drm_vm_open,
384    .close = drm_vm_shm_close,
385};
386
387/** DMA virtual memory operations */
388static const struct vm_operations_struct drm_vm_dma_ops = {
389    .fault = drm_vm_dma_fault,
390    .open = drm_vm_open,
391    .close = drm_vm_close,
392};
393
394/** Scatter-gather virtual memory operations */
395static const struct vm_operations_struct drm_vm_sg_ops = {
396    .fault = drm_vm_sg_fault,
397    .open = drm_vm_open,
398    .close = drm_vm_close,
399};
400
401/**
402 * \c open method for shared virtual memory.
403 *
404 * \param vma virtual memory area.
405 *
406 * Create a new drm_vma_entry structure as the \p vma private data entry and
407 * add it to drm_device::vmalist.
408 */
409void drm_vm_open_locked(struct drm_device *dev,
410        struct vm_area_struct *vma)
411{
412    struct drm_vma_entry *vma_entry;
413
414    DRM_DEBUG("0x%08lx,0x%08lx\n",
415          vma->vm_start, vma->vm_end - vma->vm_start);
416    atomic_inc(&dev->vma_count);
417
418    vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
419    if (vma_entry) {
420        vma_entry->vma = vma;
421        vma_entry->pid = current->pid;
422        list_add(&vma_entry->head, &dev->vmalist);
423    }
424}
425
426static void drm_vm_open(struct vm_area_struct *vma)
427{
428    struct drm_file *priv = vma->vm_file->private_data;
429    struct drm_device *dev = priv->minor->dev;
430
431    mutex_lock(&dev->struct_mutex);
432    drm_vm_open_locked(dev, vma);
433    mutex_unlock(&dev->struct_mutex);
434}
435
436void drm_vm_close_locked(struct drm_device *dev,
437        struct vm_area_struct *vma)
438{
439    struct drm_vma_entry *pt, *temp;
440
441    DRM_DEBUG("0x%08lx,0x%08lx\n",
442          vma->vm_start, vma->vm_end - vma->vm_start);
443    atomic_dec(&dev->vma_count);
444
445    list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
446        if (pt->vma == vma) {
447            list_del(&pt->head);
448            kfree(pt);
449            break;
450        }
451    }
452}
453
454/**
455 * \c close method for all virtual memory types.
456 *
457 * \param vma virtual memory area.
458 *
459 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
460 * free it.
461 */
462static void drm_vm_close(struct vm_area_struct *vma)
463{
464    struct drm_file *priv = vma->vm_file->private_data;
465    struct drm_device *dev = priv->minor->dev;
466
467    mutex_lock(&dev->struct_mutex);
468    drm_vm_close_locked(dev, vma);
469    mutex_unlock(&dev->struct_mutex);
470}
471
472/**
473 * mmap DMA memory.
474 *
475 * \param file_priv DRM file private.
476 * \param vma virtual memory area.
477 * \return zero on success or a negative number on failure.
478 *
479 * Sets the virtual memory area operations structure to vm_dma_ops, the file
480 * pointer, and calls vm_open().
481 */
482static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
483{
484    struct drm_file *priv = filp->private_data;
485    struct drm_device *dev;
486    struct drm_device_dma *dma;
487    unsigned long length = vma->vm_end - vma->vm_start;
488
489    dev = priv->minor->dev;
490    dma = dev->dma;
491    DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
492          vma->vm_start, vma->vm_end, vma->vm_pgoff);
493
494    /* Length must match exact page count */
495    if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
496        return -EINVAL;
497    }
498
499    if (!capable(CAP_SYS_ADMIN) &&
500        (dma->flags & _DRM_DMA_USE_PCI_RO)) {
501        vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
502#if defined(__i386__) || defined(__x86_64__)
503        pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
504#else
505        /* Ye gads this is ugly. With more thought
506           we could move this up higher and use
507           `protection_map' instead. */
508        vma->vm_page_prot =
509            __pgprot(pte_val
510                 (pte_wrprotect
511                  (__pte(pgprot_val(vma->vm_page_prot)))));
512#endif
513    }
514
515    vma->vm_ops = &drm_vm_dma_ops;
516
517    vma->vm_flags |= VM_RESERVED; /* Don't swap */
518    vma->vm_flags |= VM_DONTEXPAND;
519
520    drm_vm_open_locked(dev, vma);
521    return 0;
522}
523
524static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
525{
526#ifdef __alpha__
527    return dev->hose->dense_mem_base;
528#else
529    return 0;
530#endif
531}
532
533/**
534 * mmap DMA memory.
535 *
536 * \param file_priv DRM file private.
537 * \param vma virtual memory area.
538 * \return zero on success or a negative number on failure.
539 *
540 * If the virtual memory area has no offset associated with it then it's a DMA
541 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
542 * checks that the restricted flag is not set, sets the virtual memory operations
543 * according to the mapping type and remaps the pages. Finally sets the file
544 * pointer and calls vm_open().
545 */
546int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
547{
548    struct drm_file *priv = filp->private_data;
549    struct drm_device *dev = priv->minor->dev;
550    struct drm_local_map *map = NULL;
551    resource_size_t offset = 0;
552    struct drm_hash_item *hash;
553
554    DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
555          vma->vm_start, vma->vm_end, vma->vm_pgoff);
556
557    if (!priv->authenticated)
558        return -EACCES;
559
560    /* We check for "dma". On Apple's UniNorth, it's valid to have
561     * the AGP mapped at physical address 0
562     * --BenH.
563     */
564    if (!vma->vm_pgoff
565#if __OS_HAS_AGP
566        && (!dev->agp
567        || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
568#endif
569        )
570        return drm_mmap_dma(filp, vma);
571
572    if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
573        DRM_ERROR("Could not find map\n");
574        return -EINVAL;
575    }
576
577    map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
578    if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
579        return -EPERM;
580
581    /* Check for valid size. */
582    if (map->size < vma->vm_end - vma->vm_start)
583        return -EINVAL;
584
585    if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
586        vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
587#if defined(__i386__) || defined(__x86_64__)
588        pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
589#else
590        /* Ye gads this is ugly. With more thought
591           we could move this up higher and use
592           `protection_map' instead. */
593        vma->vm_page_prot =
594            __pgprot(pte_val
595                 (pte_wrprotect
596                  (__pte(pgprot_val(vma->vm_page_prot)))));
597#endif
598    }
599
600    switch (map->type) {
601#if !defined(__arm__)
602    case _DRM_AGP:
603        if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
604            /*
605             * On some platforms we can't talk to bus dma address from the CPU, so for
606             * memory of type DRM_AGP, we'll deal with sorting out the real physical
607             * pages and mappings in fault()
608             */
609#if defined(__powerpc__)
610            pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
611#endif
612            vma->vm_ops = &drm_vm_ops;
613            break;
614        }
615        /* fall through to _DRM_FRAME_BUFFER... */
616#endif
617    case _DRM_FRAME_BUFFER:
618    case _DRM_REGISTERS:
619        offset = drm_core_get_reg_ofs(dev);
620        vma->vm_flags |= VM_IO; /* not in core dump */
621        vma->vm_page_prot = drm_io_prot(map->type, vma);
622#if !defined(__arm__)
623        if (io_remap_pfn_range(vma, vma->vm_start,
624                       (map->offset + offset) >> PAGE_SHIFT,
625                       vma->vm_end - vma->vm_start,
626                       vma->vm_page_prot))
627            return -EAGAIN;
628#else
629        if (remap_pfn_range(vma, vma->vm_start,
630                    (map->offset + offset) >> PAGE_SHIFT,
631                    vma->vm_end - vma->vm_start,
632                    vma->vm_page_prot))
633            return -EAGAIN;
634#endif
635
636        DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
637              " offset = 0x%llx\n",
638              map->type,
639              vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
640
641        vma->vm_ops = &drm_vm_ops;
642        break;
643    case _DRM_CONSISTENT:
644        /* Consistent memory is really like shared memory. But
645         * it's allocated in a different way, so avoid fault */
646        if (remap_pfn_range(vma, vma->vm_start,
647            page_to_pfn(virt_to_page(map->handle)),
648            vma->vm_end - vma->vm_start, vma->vm_page_prot))
649            return -EAGAIN;
650        vma->vm_page_prot = drm_dma_prot(map->type, vma);
651    /* fall through to _DRM_SHM */
652    case _DRM_SHM:
653        vma->vm_ops = &drm_vm_shm_ops;
654        vma->vm_private_data = (void *)map;
655        /* Don't let this area swap. Change when
656           DRM_KERNEL advisory is supported. */
657        vma->vm_flags |= VM_RESERVED;
658        break;
659    case _DRM_SCATTER_GATHER:
660        vma->vm_ops = &drm_vm_sg_ops;
661        vma->vm_private_data = (void *)map;
662        vma->vm_flags |= VM_RESERVED;
663        vma->vm_page_prot = drm_dma_prot(map->type, vma);
664        break;
665    default:
666        return -EINVAL; /* This should never happen. */
667    }
668    vma->vm_flags |= VM_RESERVED; /* Don't swap */
669    vma->vm_flags |= VM_DONTEXPAND;
670
671    drm_vm_open_locked(dev, vma);
672    return 0;
673}
674
675int drm_mmap(struct file *filp, struct vm_area_struct *vma)
676{
677    struct drm_file *priv = filp->private_data;
678    struct drm_device *dev = priv->minor->dev;
679    int ret;
680
681    if (drm_device_is_unplugged(dev))
682        return -ENODEV;
683
684    mutex_lock(&dev->struct_mutex);
685    ret = drm_mmap_locked(filp, vma);
686    mutex_unlock(&dev->struct_mutex);
687
688    return ret;
689}
690EXPORT_SYMBOL(drm_mmap);
691

Archive Download this file



interactive