habanalabs.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
  2. *
  3. * Copyright 2016-2019 HabanaLabs, Ltd.
  4. * All Rights Reserved.
  5. *
  6. */
  7. #ifndef HABANALABS_H_
  8. #define HABANALABS_H_
  9. #include <linux/types.h>
  10. #include <linux/ioctl.h>
  11. /*
  12. * Defines that are asic-specific but constitutes as ABI between kernel driver
  13. * and userspace
  14. */
  15. #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
  16. /*
  17. * Queue Numbering
  18. *
  19. * The external queues (PCI DMA channels) MUST be before the internal queues
  20. * and each group (PCI DMA channels and internal) must be contiguous inside
  21. * itself but there can be a gap between the two groups (although not
  22. * recommended)
  23. */
  24. enum goya_queue_id {
  25. GOYA_QUEUE_ID_DMA_0 = 0,
  26. GOYA_QUEUE_ID_DMA_1 = 1,
  27. GOYA_QUEUE_ID_DMA_2 = 2,
  28. GOYA_QUEUE_ID_DMA_3 = 3,
  29. GOYA_QUEUE_ID_DMA_4 = 4,
  30. GOYA_QUEUE_ID_CPU_PQ = 5,
  31. GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */
  32. GOYA_QUEUE_ID_TPC0 = 7,
  33. GOYA_QUEUE_ID_TPC1 = 8,
  34. GOYA_QUEUE_ID_TPC2 = 9,
  35. GOYA_QUEUE_ID_TPC3 = 10,
  36. GOYA_QUEUE_ID_TPC4 = 11,
  37. GOYA_QUEUE_ID_TPC5 = 12,
  38. GOYA_QUEUE_ID_TPC6 = 13,
  39. GOYA_QUEUE_ID_TPC7 = 14,
  40. GOYA_QUEUE_ID_SIZE
  41. };
  42. /*
  43. * Engine Numbering
  44. *
  45. * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle'
  46. */
  47. enum goya_engine_id {
  48. GOYA_ENGINE_ID_DMA_0 = 0,
  49. GOYA_ENGINE_ID_DMA_1,
  50. GOYA_ENGINE_ID_DMA_2,
  51. GOYA_ENGINE_ID_DMA_3,
  52. GOYA_ENGINE_ID_DMA_4,
  53. GOYA_ENGINE_ID_MME_0,
  54. GOYA_ENGINE_ID_TPC_0,
  55. GOYA_ENGINE_ID_TPC_1,
  56. GOYA_ENGINE_ID_TPC_2,
  57. GOYA_ENGINE_ID_TPC_3,
  58. GOYA_ENGINE_ID_TPC_4,
  59. GOYA_ENGINE_ID_TPC_5,
  60. GOYA_ENGINE_ID_TPC_6,
  61. GOYA_ENGINE_ID_TPC_7,
  62. GOYA_ENGINE_ID_SIZE
  63. };
  64. enum hl_device_status {
  65. HL_DEVICE_STATUS_OPERATIONAL,
  66. HL_DEVICE_STATUS_IN_RESET,
  67. HL_DEVICE_STATUS_MALFUNCTION
  68. };
  69. /* Opcode for management ioctl
  70. *
  71. * HW_IP_INFO - Receive information about different IP blocks in the
  72. * device.
  73. * HL_INFO_HW_EVENTS - Receive an array describing how many times each event
  74. * occurred since the last hard reset.
  75. * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the
  76. * specific context. This is relevant only for devices
  77. * where the dram is managed by the kernel driver
  78. * HL_INFO_HW_IDLE - Retrieve information about the idle status of each
  79. * internal engine.
  80. * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't
  81. * require an open context.
  82. * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
  83. * over the last period specified by the user.
  84. * The period can be between 100ms to 1s, in
  85. * resolution of 100ms. The return value is a
  86. * percentage of the utilization rate.
  87. * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each
  88. * event occurred since the driver was loaded.
  89. */
  90. #define HL_INFO_HW_IP_INFO 0
  91. #define HL_INFO_HW_EVENTS 1
  92. #define HL_INFO_DRAM_USAGE 2
  93. #define HL_INFO_HW_IDLE 3
  94. #define HL_INFO_DEVICE_STATUS 4
  95. #define HL_INFO_DEVICE_UTILIZATION 6
  96. #define HL_INFO_HW_EVENTS_AGGREGATE 7
  97. #define HL_INFO_VERSION_MAX_LEN 128
  98. struct hl_info_hw_ip_info {
  99. __u64 sram_base_address;
  100. __u64 dram_base_address;
  101. __u64 dram_size;
  102. __u32 sram_size;
  103. __u32 num_of_events;
  104. __u32 device_id; /* PCI Device ID */
  105. __u32 reserved[3];
  106. __u32 armcp_cpld_version;
  107. __u32 psoc_pci_pll_nr;
  108. __u32 psoc_pci_pll_nf;
  109. __u32 psoc_pci_pll_od;
  110. __u32 psoc_pci_pll_div_factor;
  111. __u8 tpc_enabled_mask;
  112. __u8 dram_enabled;
  113. __u8 pad[2];
  114. __u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
  115. };
  116. struct hl_info_dram_usage {
  117. __u64 dram_free_mem;
  118. __u64 ctx_dram_mem;
  119. };
  120. struct hl_info_hw_idle {
  121. __u32 is_idle;
  122. /*
  123. * Bitmask of busy engines.
  124. * Bits definition is according to `enum <chip>_enging_id'.
  125. */
  126. __u32 busy_engines_mask;
  127. };
  128. struct hl_info_device_status {
  129. __u32 status;
  130. __u32 pad;
  131. };
  132. struct hl_info_device_utilization {
  133. __u32 utilization;
  134. __u32 pad;
  135. };
  136. struct hl_info_args {
  137. /* Location of relevant struct in userspace */
  138. __u64 return_pointer;
  139. /*
  140. * The size of the return value. Just like "size" in "snprintf",
  141. * it limits how many bytes the kernel can write
  142. *
  143. * For hw_events array, the size should be
  144. * hl_info_hw_ip_info.num_of_events * sizeof(__u32)
  145. */
  146. __u32 return_size;
  147. /* HL_INFO_* */
  148. __u32 op;
  149. union {
  150. /* Context ID - Currently not in use */
  151. __u32 ctx_id;
  152. /* Period value for utilization rate (100ms - 1000ms, in 100ms
  153. * resolution.
  154. */
  155. __u32 period_ms;
  156. };
  157. __u32 pad;
  158. };
  159. /* Opcode to create a new command buffer */
  160. #define HL_CB_OP_CREATE 0
  161. /* Opcode to destroy previously created command buffer */
  162. #define HL_CB_OP_DESTROY 1
  163. struct hl_cb_in {
  164. /* Handle of CB or 0 if we want to create one */
  165. __u64 cb_handle;
  166. /* HL_CB_OP_* */
  167. __u32 op;
  168. /* Size of CB. Maximum size is 2MB. The minimum size that will be
  169. * allocated, regardless of this parameter's value, is PAGE_SIZE
  170. */
  171. __u32 cb_size;
  172. /* Context ID - Currently not in use */
  173. __u32 ctx_id;
  174. __u32 pad;
  175. };
  176. struct hl_cb_out {
  177. /* Handle of CB */
  178. __u64 cb_handle;
  179. };
  180. union hl_cb_args {
  181. struct hl_cb_in in;
  182. struct hl_cb_out out;
  183. };
  184. /*
  185. * This structure size must always be fixed to 64-bytes for backward
  186. * compatibility
  187. */
  188. struct hl_cs_chunk {
  189. /*
  190. * For external queue, this represents a Handle of CB on the Host
  191. * For internal queue, this represents an SRAM or DRAM address of the
  192. * internal CB
  193. */
  194. __u64 cb_handle;
  195. /* Index of queue to put the CB on */
  196. __u32 queue_index;
  197. /*
  198. * Size of command buffer with valid packets
  199. * Can be smaller then actual CB size
  200. */
  201. __u32 cb_size;
  202. /* HL_CS_CHUNK_FLAGS_* */
  203. __u32 cs_chunk_flags;
  204. /* Align structure to 64 bytes */
  205. __u32 pad[11];
  206. };
  207. #define HL_CS_FLAGS_FORCE_RESTORE 0x1
  208. #define HL_CS_STATUS_SUCCESS 0
  209. struct hl_cs_in {
  210. /* this holds address of array of hl_cs_chunk for restore phase */
  211. __u64 chunks_restore;
  212. /* this holds address of array of hl_cs_chunk for execution phase */
  213. __u64 chunks_execute;
  214. /* this holds address of array of hl_cs_chunk for store phase -
  215. * Currently not in use
  216. */
  217. __u64 chunks_store;
  218. /* Number of chunks in restore phase array */
  219. __u32 num_chunks_restore;
  220. /* Number of chunks in execution array */
  221. __u32 num_chunks_execute;
  222. /* Number of chunks in restore phase array - Currently not in use */
  223. __u32 num_chunks_store;
  224. /* HL_CS_FLAGS_* */
  225. __u32 cs_flags;
  226. /* Context ID - Currently not in use */
  227. __u32 ctx_id;
  228. };
  229. struct hl_cs_out {
  230. /*
  231. * seq holds the sequence number of the CS to pass to wait ioctl. All
  232. * values are valid except for 0 and ULLONG_MAX
  233. */
  234. __u64 seq;
  235. /* HL_CS_STATUS_* */
  236. __u32 status;
  237. __u32 pad;
  238. };
  239. union hl_cs_args {
  240. struct hl_cs_in in;
  241. struct hl_cs_out out;
  242. };
  243. struct hl_wait_cs_in {
  244. /* Command submission sequence number */
  245. __u64 seq;
  246. /* Absolute timeout to wait in microseconds */
  247. __u64 timeout_us;
  248. /* Context ID - Currently not in use */
  249. __u32 ctx_id;
  250. __u32 pad;
  251. };
  252. #define HL_WAIT_CS_STATUS_COMPLETED 0
  253. #define HL_WAIT_CS_STATUS_BUSY 1
  254. #define HL_WAIT_CS_STATUS_TIMEDOUT 2
  255. #define HL_WAIT_CS_STATUS_ABORTED 3
  256. #define HL_WAIT_CS_STATUS_INTERRUPTED 4
  257. struct hl_wait_cs_out {
  258. /* HL_WAIT_CS_STATUS_* */
  259. __u32 status;
  260. __u32 pad;
  261. };
  262. union hl_wait_cs_args {
  263. struct hl_wait_cs_in in;
  264. struct hl_wait_cs_out out;
  265. };
  266. /* Opcode to alloc device memory */
  267. #define HL_MEM_OP_ALLOC 0
  268. /* Opcode to free previously allocated device memory */
  269. #define HL_MEM_OP_FREE 1
  270. /* Opcode to map host memory */
  271. #define HL_MEM_OP_MAP 2
  272. /* Opcode to unmap previously mapped host memory */
  273. #define HL_MEM_OP_UNMAP 3
  274. /* Memory flags */
  275. #define HL_MEM_CONTIGUOUS 0x1
  276. #define HL_MEM_SHARED 0x2
  277. #define HL_MEM_USERPTR 0x4
  278. struct hl_mem_in {
  279. union {
  280. /* HL_MEM_OP_ALLOC- allocate device memory */
  281. struct {
  282. /* Size to alloc */
  283. __u64 mem_size;
  284. } alloc;
  285. /* HL_MEM_OP_FREE - free device memory */
  286. struct {
  287. /* Handle returned from HL_MEM_OP_ALLOC */
  288. __u64 handle;
  289. } free;
  290. /* HL_MEM_OP_MAP - map device memory */
  291. struct {
  292. /*
  293. * Requested virtual address of mapped memory.
  294. * The driver will try to map the requested region to
  295. * this hint address, as long as the address is valid
  296. * and not already mapped. The user should check the
  297. * returned address of the IOCTL to make sure he got
  298. * the hint address. Passing 0 here means that the
  299. * driver will choose the address itself.
  300. */
  301. __u64 hint_addr;
  302. /* Handle returned from HL_MEM_OP_ALLOC */
  303. __u64 handle;
  304. } map_device;
  305. /* HL_MEM_OP_MAP - map host memory */
  306. struct {
  307. /* Address of allocated host memory */
  308. __u64 host_virt_addr;
  309. /*
  310. * Requested virtual address of mapped memory.
  311. * The driver will try to map the requested region to
  312. * this hint address, as long as the address is valid
  313. * and not already mapped. The user should check the
  314. * returned address of the IOCTL to make sure he got
  315. * the hint address. Passing 0 here means that the
  316. * driver will choose the address itself.
  317. */
  318. __u64 hint_addr;
  319. /* Size of allocated host memory */
  320. __u64 mem_size;
  321. } map_host;
  322. /* HL_MEM_OP_UNMAP - unmap host memory */
  323. struct {
  324. /* Virtual address returned from HL_MEM_OP_MAP */
  325. __u64 device_virt_addr;
  326. } unmap;
  327. };
  328. /* HL_MEM_OP_* */
  329. __u32 op;
  330. /* HL_MEM_* flags */
  331. __u32 flags;
  332. /* Context ID - Currently not in use */
  333. __u32 ctx_id;
  334. __u32 pad;
  335. };
  336. struct hl_mem_out {
  337. union {
  338. /*
  339. * Used for HL_MEM_OP_MAP as the virtual address that was
  340. * assigned in the device VA space.
  341. * A value of 0 means the requested operation failed.
  342. */
  343. __u64 device_virt_addr;
  344. /*
  345. * Used for HL_MEM_OP_ALLOC. This is the assigned
  346. * handle for the allocated memory
  347. */
  348. __u64 handle;
  349. };
  350. };
  351. union hl_mem_args {
  352. struct hl_mem_in in;
  353. struct hl_mem_out out;
  354. };
  355. #define HL_DEBUG_MAX_AUX_VALUES 10
  356. struct hl_debug_params_etr {
  357. /* Address in memory to allocate buffer */
  358. __u64 buffer_address;
  359. /* Size of buffer to allocate */
  360. __u64 buffer_size;
  361. /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
  362. __u32 sink_mode;
  363. __u32 pad;
  364. };
  365. struct hl_debug_params_etf {
  366. /* Address in memory to allocate buffer */
  367. __u64 buffer_address;
  368. /* Size of buffer to allocate */
  369. __u64 buffer_size;
  370. /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
  371. __u32 sink_mode;
  372. __u32 pad;
  373. };
  374. struct hl_debug_params_stm {
  375. /* Two bit masks for HW event and Stimulus Port */
  376. __u64 he_mask;
  377. __u64 sp_mask;
  378. /* Trace source ID */
  379. __u32 id;
  380. /* Frequency for the timestamp register */
  381. __u32 frequency;
  382. };
  383. struct hl_debug_params_bmon {
  384. /* Two address ranges that the user can request to filter */
  385. __u64 start_addr0;
  386. __u64 addr_mask0;
  387. __u64 start_addr1;
  388. __u64 addr_mask1;
  389. /* Capture window configuration */
  390. __u32 bw_win;
  391. __u32 win_capture;
  392. /* Trace source ID */
  393. __u32 id;
  394. __u32 pad;
  395. };
  396. struct hl_debug_params_spmu {
  397. /* Event types selection */
  398. __u64 event_types[HL_DEBUG_MAX_AUX_VALUES];
  399. /* Number of event types selection */
  400. __u32 event_types_num;
  401. __u32 pad;
  402. };
  403. /* Opcode for ETR component */
  404. #define HL_DEBUG_OP_ETR 0
  405. /* Opcode for ETF component */
  406. #define HL_DEBUG_OP_ETF 1
  407. /* Opcode for STM component */
  408. #define HL_DEBUG_OP_STM 2
  409. /* Opcode for FUNNEL component */
  410. #define HL_DEBUG_OP_FUNNEL 3
  411. /* Opcode for BMON component */
  412. #define HL_DEBUG_OP_BMON 4
  413. /* Opcode for SPMU component */
  414. #define HL_DEBUG_OP_SPMU 5
  415. /* Opcode for timestamp (deprecated) */
  416. #define HL_DEBUG_OP_TIMESTAMP 6
  417. /* Opcode for setting the device into or out of debug mode. The enable
  418. * variable should be 1 for enabling debug mode and 0 for disabling it
  419. */
  420. #define HL_DEBUG_OP_SET_MODE 7
  421. struct hl_debug_args {
  422. /*
  423. * Pointer to user input structure.
  424. * This field is relevant to specific opcodes.
  425. */
  426. __u64 input_ptr;
  427. /* Pointer to user output structure */
  428. __u64 output_ptr;
  429. /* Size of user input structure */
  430. __u32 input_size;
  431. /* Size of user output structure */
  432. __u32 output_size;
  433. /* HL_DEBUG_OP_* */
  434. __u32 op;
  435. /*
  436. * Register index in the component, taken from the debug_regs_index enum
  437. * in the various ASIC header files
  438. */
  439. __u32 reg_idx;
  440. /* Enable/disable */
  441. __u32 enable;
  442. /* Context ID - Currently not in use */
  443. __u32 ctx_id;
  444. };
  445. /*
  446. * Various information operations such as:
  447. * - H/W IP information
  448. * - Current dram usage
  449. *
  450. * The user calls this IOCTL with an opcode that describes the required
  451. * information. The user should supply a pointer to a user-allocated memory
  452. * chunk, which will be filled by the driver with the requested information.
  453. *
  454. * The user supplies the maximum amount of size to copy into the user's memory,
  455. * in order to prevent data corruption in case of differences between the
  456. * definitions of structures in kernel and userspace, e.g. in case of old
  457. * userspace and new kernel driver
  458. */
  459. #define HL_IOCTL_INFO \
  460. _IOWR('H', 0x01, struct hl_info_args)
  461. /*
  462. * Command Buffer
  463. * - Request a Command Buffer
  464. * - Destroy a Command Buffer
  465. *
  466. * The command buffers are memory blocks that reside in DMA-able address
  467. * space and are physically contiguous so they can be accessed by the device
  468. * directly. They are allocated using the coherent DMA API.
  469. *
  470. * When creating a new CB, the IOCTL returns a handle of it, and the user-space
  471. * process needs to use that handle to mmap the buffer so it can access them.
  472. *
  473. */
  474. #define HL_IOCTL_CB \
  475. _IOWR('H', 0x02, union hl_cb_args)
  476. /*
  477. * Command Submission
  478. *
  479. * To submit work to the device, the user need to call this IOCTL with a set
  480. * of JOBS. That set of JOBS constitutes a CS object.
  481. * Each JOB will be enqueued on a specific queue, according to the user's input.
  482. * There can be more then one JOB per queue.
  483. *
  484. * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
  485. * a second set is for "execution" phase and a third set is for "store" phase.
  486. * The JOBS on the "restore" phase are enqueued only after context-switch
  487. * (or if its the first CS for this context). The user can also order the
  488. * driver to run the "restore" phase explicitly
  489. *
  490. * There are two types of queues - external and internal. External queues
  491. * are DMA queues which transfer data from/to the Host. All other queues are
  492. * internal. The driver will get completion notifications from the device only
  493. * on JOBS which are enqueued in the external queues.
  494. *
  495. * For jobs on external queues, the user needs to create command buffers
  496. * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
  497. * internal queues, the user needs to prepare a "command buffer" with packets
  498. * on either the SRAM or DRAM, and give the device address of that buffer to
  499. * the CS ioctl.
  500. *
  501. * This IOCTL is asynchronous in regard to the actual execution of the CS. This
  502. * means it returns immediately after ALL the JOBS were enqueued on their
  503. * relevant queues. Therefore, the user mustn't assume the CS has been completed
  504. * or has even started to execute.
  505. *
  506. * Upon successful enqueue, the IOCTL returns a sequence number which the user
  507. * can use with the "Wait for CS" IOCTL to check whether the handle's CS
  508. * external JOBS have been completed. Note that if the CS has internal JOBS
  509. * which can execute AFTER the external JOBS have finished, the driver might
  510. * report that the CS has finished executing BEFORE the internal JOBS have
  511. * actually finish executing.
  512. *
  513. * Even though the sequence number increments per CS, the user can NOT
  514. * automatically assume that if CS with sequence number N finished, then CS
  515. * with sequence number N-1 also finished. The user can make this assumption if
  516. * and only if CS N and CS N-1 are exactly the same (same CBs for the same
  517. * queues).
  518. */
  519. #define HL_IOCTL_CS \
  520. _IOWR('H', 0x03, union hl_cs_args)
  521. /*
  522. * Wait for Command Submission
  523. *
  524. * The user can call this IOCTL with a handle it received from the CS IOCTL
  525. * to wait until the handle's CS has finished executing. The user will wait
  526. * inside the kernel until the CS has finished or until the user-requeusted
  527. * timeout has expired.
  528. *
  529. * The return value of the IOCTL is a standard Linux error code. The possible
  530. * values are:
  531. *
  532. * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal
  533. * that the user process received
  534. * ETIMEDOUT - The CS has caused a timeout on the device
  535. * EIO - The CS was aborted (usually because the device was reset)
  536. * ENODEV - The device wants to do hard-reset (so user need to close FD)
  537. *
  538. * The driver also returns a custom define inside the IOCTL which can be:
  539. *
  540. * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
  541. * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
  542. * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device
  543. * (ETIMEDOUT)
  544. * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
  545. * device was reset (EIO)
  546. * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
  547. *
  548. */
  549. #define HL_IOCTL_WAIT_CS \
  550. _IOWR('H', 0x04, union hl_wait_cs_args)
  551. /*
  552. * Memory
  553. * - Map host memory to device MMU
  554. * - Unmap host memory from device MMU
  555. *
  556. * This IOCTL allows the user to map host memory to the device MMU
  557. *
  558. * For host memory, the IOCTL doesn't allocate memory. The user is supposed
  559. * to allocate the memory in user-space (malloc/new). The driver pins the
  560. * physical pages (up to the allowed limit by the OS), assigns a virtual
  561. * address in the device VA space and initializes the device MMU.
  562. *
  563. * There is an option for the user to specify the requested virtual address.
  564. *
  565. */
  566. #define HL_IOCTL_MEMORY \
  567. _IOWR('H', 0x05, union hl_mem_args)
  568. /*
  569. * Debug
  570. * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces
  571. *
  572. * This IOCTL allows the user to get debug traces from the chip.
  573. *
  574. * Before the user can send configuration requests of the various
  575. * debug/profile engines, it needs to set the device into debug mode.
  576. * This is because the debug/profile infrastructure is shared component in the
  577. * device and we can't allow multiple users to access it at the same time.
  578. *
  579. * Once a user set the device into debug mode, the driver won't allow other
  580. * users to "work" with the device, i.e. open a FD. If there are multiple users
  581. * opened on the device, the driver won't allow any user to debug the device.
  582. *
  583. * For each configuration request, the user needs to provide the register index
  584. * and essential data such as buffer address and size.
  585. *
  586. * Once the user has finished using the debug/profile engines, he should
  587. * set the device into non-debug mode, i.e. disable debug mode.
  588. *
  589. * The driver can decide to "kick out" the user if he abuses this interface.
  590. *
  591. */
  592. #define HL_IOCTL_DEBUG \
  593. _IOWR('H', 0x06, struct hl_debug_args)
  594. #define HL_COMMAND_START 0x01
  595. #define HL_COMMAND_END 0x07
  596. #endif /* HABANALABS_H_ */