aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_sched_job_types.h
blob: 5e12724219fdd485f2b770bd4b31e78aa2ab42af (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2022 Intel Corporation
 */

#ifndef _XE_SCHED_JOB_TYPES_H_
#define _XE_SCHED_JOB_TYPES_H_

#include <linux/kref.h>

#include <drm/gpu_scheduler.h>

struct xe_exec_queue;

/**
 * struct xe_sched_job - XE schedule job (batch buffer tracking)
 */
struct xe_sched_job {
	/** @drm: base DRM scheduler job */
	struct drm_sched_job drm;
	/** @q: Exec queue */
	struct xe_exec_queue *q;
	/** @refcount: ref count of this job */
	struct kref refcount;
	/**
	 * @fence: dma fence to indicate completion. 1 way relationship - job
	 * can safely reference fence, fence cannot safely reference job.
	 */
#define JOB_FLAG_SUBMIT		DMA_FENCE_FLAG_USER_BITS
	struct dma_fence *fence;
	/** @user_fence: write back value when BB is complete */
	struct {
		/** @user_fence.used: user fence is used */
		bool used;
		/** @user_fence.addr: address to write to */
		u64 addr;
		/** @user_fence.value: write back value */
		u64 value;
	} user_fence;
	/** @migrate_flush_flags: Additional flush flags for migration jobs */
	u32 migrate_flush_flags;
	/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
	bool ring_ops_flush_tlb;
	/** @batch_addr: batch buffer address of job */
	u64 batch_addr[];
};

struct xe_sched_job_snapshot {
	u16 batch_addr_len;
	u64 batch_addr[];
};

#endif