|  | /* | 
|  | * Copyright 2015 Advanced Micro Devices, Inc. | 
|  | * | 
|  | * Permission is hereby granted, free of charge, to any person obtaining a | 
|  | * copy of this software and associated documentation files (the "Software"), | 
|  | * to deal in the Software without restriction, including without limitation | 
|  | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|  | * and/or sell copies of the Software, and to permit persons to whom the | 
|  | * Software is furnished to do so, subject to the following conditions: | 
|  | * | 
|  | * The above copyright notice and this permission notice shall be included in | 
|  | * all copies or substantial portions of the Software. | 
|  | * | 
|  | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|  | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|  | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
|  | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
|  | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 
|  | * OTHER DEALINGS IN THE SOFTWARE. | 
|  | * | 
|  | * | 
|  | */ | 
|  | #include <linux/kthread.h> | 
|  | #include <linux/wait.h> | 
|  | #include <linux/sched.h> | 
|  |  | 
|  | #include <drm/drm_drv.h> | 
|  |  | 
|  | #include "amdgpu.h" | 
|  | #include "amdgpu_trace.h" | 
|  |  | 
|  | static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) | 
|  | { | 
|  | struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); | 
|  | struct amdgpu_job *job = to_amdgpu_job(s_job); | 
|  | struct amdgpu_task_info ti; | 
|  | struct amdgpu_device *adev = ring->adev; | 
|  | int idx; | 
|  | int r; | 
|  |  | 
|  | if (!drm_dev_enter(adev_to_drm(adev), &idx)) { | 
|  | DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", | 
|  | __func__, s_job->sched->name); | 
|  |  | 
|  | /* Effectively the job is aborted as the device is gone */ | 
|  | return DRM_GPU_SCHED_STAT_ENODEV; | 
|  | } | 
|  |  | 
|  | memset(&ti, 0, sizeof(struct amdgpu_task_info)); | 
|  |  | 
|  | if (amdgpu_gpu_recovery && | 
|  | amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { | 
|  | DRM_ERROR("ring %s timeout, but soft recovered\n", | 
|  | s_job->sched->name); | 
|  | goto exit; | 
|  | } | 
|  |  | 
|  | amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); | 
|  | DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", | 
|  | job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), | 
|  | ring->fence_drv.sync_seq); | 
|  | DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", | 
|  | ti.process_name, ti.tgid, ti.task_name, ti.pid); | 
|  |  | 
|  | if (amdgpu_device_should_recover_gpu(ring->adev)) { | 
|  | r = amdgpu_device_gpu_recover_imp(ring->adev, job); | 
|  | if (r) | 
|  | DRM_ERROR("GPU Recovery Failed: %d\n", r); | 
|  | } else { | 
|  | drm_sched_suspend_timeout(&ring->sched); | 
|  | if (amdgpu_sriov_vf(adev)) | 
|  | adev->virt.tdr_debug = true; | 
|  | } | 
|  |  | 
|  | exit: | 
|  | drm_dev_exit(idx); | 
|  | return DRM_GPU_SCHED_STAT_NOMINAL; | 
|  | } | 
|  |  | 
|  | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, | 
|  | struct amdgpu_job **job, struct amdgpu_vm *vm) | 
|  | { | 
|  | if (num_ibs == 0) | 
|  | return -EINVAL; | 
|  |  | 
|  | *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); | 
|  | if (!*job) | 
|  | return -ENOMEM; | 
|  |  | 
|  | /* | 
|  | * Initialize the scheduler to at least some ring so that we always | 
|  | * have a pointer to adev. | 
|  | */ | 
|  | (*job)->base.sched = &adev->rings[0]->sched; | 
|  | (*job)->vm = vm; | 
|  | (*job)->num_ibs = num_ibs; | 
|  |  | 
|  | amdgpu_sync_create(&(*job)->sync); | 
|  | amdgpu_sync_create(&(*job)->sched_sync); | 
|  | (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); | 
|  | (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, | 
|  | enum amdgpu_ib_pool_type pool_type, | 
|  | struct amdgpu_job **job) | 
|  | { | 
|  | int r; | 
|  |  | 
|  | r = amdgpu_job_alloc(adev, 1, job, NULL); | 
|  | if (r) | 
|  | return r; | 
|  |  | 
|  | r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); | 
|  | if (r) | 
|  | kfree(*job); | 
|  |  | 
|  | return r; | 
|  | } | 
|  |  | 
|  | void amdgpu_job_free_resources(struct amdgpu_job *job) | 
|  | { | 
|  | struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); | 
|  | struct dma_fence *f; | 
|  | struct dma_fence *hw_fence; | 
|  | unsigned i; | 
|  |  | 
|  | if (job->hw_fence.ops == NULL) | 
|  | hw_fence = job->external_hw_fence; | 
|  | else | 
|  | hw_fence = &job->hw_fence; | 
|  |  | 
|  | /* use sched fence if available */ | 
|  | f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence; | 
|  | for (i = 0; i < job->num_ibs; ++i) | 
|  | amdgpu_ib_free(ring->adev, &job->ibs[i], f); | 
|  | } | 
|  |  | 
|  | static void amdgpu_job_free_cb(struct drm_sched_job *s_job) | 
|  | { | 
|  | struct amdgpu_job *job = to_amdgpu_job(s_job); | 
|  |  | 
|  | drm_sched_job_cleanup(s_job); | 
|  |  | 
|  | amdgpu_sync_free(&job->sync); | 
|  | amdgpu_sync_free(&job->sched_sync); | 
|  |  | 
|  | /* only put the hw fence if has embedded fence */ | 
|  | if (job->hw_fence.ops != NULL) | 
|  | dma_fence_put(&job->hw_fence); | 
|  | else | 
|  | kfree(job); | 
|  | } | 
|  |  | 
|  | void amdgpu_job_free(struct amdgpu_job *job) | 
|  | { | 
|  | amdgpu_job_free_resources(job); | 
|  | amdgpu_sync_free(&job->sync); | 
|  | amdgpu_sync_free(&job->sched_sync); | 
|  |  | 
|  | /* only put the hw fence if has embedded fence */ | 
|  | if (job->hw_fence.ops != NULL) | 
|  | dma_fence_put(&job->hw_fence); | 
|  | else | 
|  | kfree(job); | 
|  | } | 
|  |  | 
|  | int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, | 
|  | void *owner, struct dma_fence **f) | 
|  | { | 
|  | int r; | 
|  |  | 
|  | if (!f) | 
|  | return -EINVAL; | 
|  |  | 
|  | r = drm_sched_job_init(&job->base, entity, owner); | 
|  | if (r) | 
|  | return r; | 
|  |  | 
|  | drm_sched_job_arm(&job->base); | 
|  |  | 
|  | *f = dma_fence_get(&job->base.s_fence->finished); | 
|  | amdgpu_job_free_resources(job); | 
|  | drm_sched_entity_push_job(&job->base); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, | 
|  | struct dma_fence **fence) | 
|  | { | 
|  | int r; | 
|  |  | 
|  | job->base.sched = &ring->sched; | 
|  | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence); | 
|  | /* record external_hw_fence for direct submit */ | 
|  | job->external_hw_fence = dma_fence_get(*fence); | 
|  | if (r) | 
|  | return r; | 
|  |  | 
|  | amdgpu_job_free(job); | 
|  | dma_fence_put(*fence); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, | 
|  | struct drm_sched_entity *s_entity) | 
|  | { | 
|  | struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); | 
|  | struct amdgpu_job *job = to_amdgpu_job(sched_job); | 
|  | struct amdgpu_vm *vm = job->vm; | 
|  | struct dma_fence *fence; | 
|  | int r; | 
|  |  | 
|  | fence = amdgpu_sync_get_fence(&job->sync); | 
|  | if (fence && drm_sched_dependency_optimized(fence, s_entity)) { | 
|  | r = amdgpu_sync_fence(&job->sched_sync, fence); | 
|  | if (r) | 
|  | DRM_ERROR("Error adding fence (%d)\n", r); | 
|  | } | 
|  |  | 
|  | while (fence == NULL && vm && !job->vmid) { | 
|  | r = amdgpu_vmid_grab(vm, ring, &job->sync, | 
|  | &job->base.s_fence->finished, | 
|  | job); | 
|  | if (r) | 
|  | DRM_ERROR("Error getting VM ID (%d)\n", r); | 
|  |  | 
|  | fence = amdgpu_sync_get_fence(&job->sync); | 
|  | } | 
|  |  | 
|  | return fence; | 
|  | } | 
|  |  | 
|  | static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) | 
|  | { | 
|  | struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); | 
|  | struct dma_fence *fence = NULL, *finished; | 
|  | struct amdgpu_job *job; | 
|  | int r = 0; | 
|  |  | 
|  | job = to_amdgpu_job(sched_job); | 
|  | finished = &job->base.s_fence->finished; | 
|  |  | 
|  | BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); | 
|  |  | 
|  | trace_amdgpu_sched_run_job(job); | 
|  |  | 
|  | if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter)) | 
|  | dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */ | 
|  |  | 
|  | if (finished->error < 0) { | 
|  | DRM_INFO("Skip scheduling IBs!\n"); | 
|  | } else { | 
|  | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, | 
|  | &fence); | 
|  | if (r) | 
|  | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 
|  | } | 
|  |  | 
|  | if (!job->job_run_counter) | 
|  | dma_fence_get(fence); | 
|  | else if (finished->error < 0) | 
|  | dma_fence_put(&job->hw_fence); | 
|  | job->job_run_counter++; | 
|  | amdgpu_job_free_resources(job); | 
|  |  | 
|  | fence = r ? ERR_PTR(r) : fence; | 
|  | return fence; | 
|  | } | 
|  |  | 
|  | #define to_drm_sched_job(sched_job)		\ | 
|  | container_of((sched_job), struct drm_sched_job, queue_node) | 
|  |  | 
|  | void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) | 
|  | { | 
|  | struct drm_sched_job *s_job; | 
|  | struct drm_sched_entity *s_entity = NULL; | 
|  | int i; | 
|  |  | 
|  | /* Signal all jobs not yet scheduled */ | 
|  | for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { | 
|  | struct drm_sched_rq *rq = &sched->sched_rq[i]; | 
|  |  | 
|  | if (!rq) | 
|  | continue; | 
|  |  | 
|  | spin_lock(&rq->lock); | 
|  | list_for_each_entry(s_entity, &rq->entities, list) { | 
|  | while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { | 
|  | struct drm_sched_fence *s_fence = s_job->s_fence; | 
|  |  | 
|  | dma_fence_signal(&s_fence->scheduled); | 
|  | dma_fence_set_error(&s_fence->finished, -EHWPOISON); | 
|  | dma_fence_signal(&s_fence->finished); | 
|  | } | 
|  | } | 
|  | spin_unlock(&rq->lock); | 
|  | } | 
|  |  | 
|  | /* Signal all jobs already scheduled to HW */ | 
|  | list_for_each_entry(s_job, &sched->pending_list, list) { | 
|  | struct drm_sched_fence *s_fence = s_job->s_fence; | 
|  |  | 
|  | dma_fence_set_error(&s_fence->finished, -EHWPOISON); | 
|  | dma_fence_signal(&s_fence->finished); | 
|  | } | 
|  | } | 
|  |  | 
|  | const struct drm_sched_backend_ops amdgpu_sched_ops = { | 
|  | .dependency = amdgpu_job_dependency, | 
|  | .run_job = amdgpu_job_run, | 
|  | .timedout_job = amdgpu_job_timedout, | 
|  | .free_job = amdgpu_job_free_cb | 
|  | }; |