vki-xen-x86.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. This file is part of Valgrind, a dynamic binary instrumentation
  3. framework.
  4. Copyright (C) 2012-2017 Citrix
  5. This program is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License as
  7. published by the Free Software Foundation; either version 2 of the
  8. License, or (at your option) any later version.
  9. This program is distributed in the hope that it will be useful, but
  10. WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, write to the Free Software
  15. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  16. 02111-1307, USA.
  17. The GNU General Public License is contained in the file COPYING.
  18. */
  19. /* Contributed by Andrew Cooper <andrew.cooper3@citrix.com>
  20. and Ian Campbell <ian.campbell@citrix.com> */
  21. #ifndef __VKI_XEN_X86_H
  22. #define __VKI_XEN_X86_H
  23. #if defined(__i386__)
  24. #define ___DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
  25. typedef struct { type *p; } \
  26. __vki_xen_guest_handle_ ## name; \
  27. typedef struct { union { type *p; vki_xen_uint64_aligned_t q; }; } \
  28. __vki_xen_guest_handle_64_ ## name
  29. #define vki_xen_uint64_aligned_t vki_uint64_t __attribute__((aligned(8)))
  30. #define __VKI_XEN_GUEST_HANDLE_64(name) __vki_xen_guest_handle_64_ ## name
  31. #define VKI_XEN_GUEST_HANDLE_64(name) __VKI_XEN_GUEST_HANDLE_64(name)
  32. #else
  33. #define ___DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
  34. typedef struct { type *p; } __vki_xen_guest_handle_ ## name
  35. #define vki_xen_uint64_aligned_t vki_uint64_t
  36. #define __DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
  37. ___DEFINE_VKI_XEN_GUEST_HANDLE(name, type); \
  38. ___DEFINE_VKI_XEN_GUEST_HANDLE(const_##name, const type)
  39. #define DEFINE_VKI_XEN_GUEST_HANDLE(name) __DEFINE_VKI_XEN_GUEST_HANDLE(name, name)
  40. #define VKI_XEN_GUEST_HANDLE_64(name) VKI_XEN_GUEST_HANDLE(name)
  41. #endif
  42. #define __VKI_XEN_GUEST_HANDLE(name) __vki_xen_guest_handle_ ## name
  43. #define VKI_XEN_GUEST_HANDLE(name) __VKI_XEN_GUEST_HANDLE(name)
  44. typedef unsigned long vki_xen_pfn_t;
  45. typedef unsigned long vki_xen_ulong_t;
  46. #if defined(__i386__)
  47. struct vki_xen_cpu_user_regs {
  48. vki_uint32_t ebx;
  49. vki_uint32_t ecx;
  50. vki_uint32_t edx;
  51. vki_uint32_t esi;
  52. vki_uint32_t edi;
  53. vki_uint32_t ebp;
  54. vki_uint32_t eax;
  55. vki_uint16_t error_code; /* private */
  56. vki_uint16_t entry_vector; /* private */
  57. vki_uint32_t eip;
  58. vki_uint16_t cs;
  59. vki_uint8_t saved_upcall_mask;
  60. vki_uint8_t _pad0;
  61. vki_uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
  62. vki_uint32_t esp;
  63. vki_uint16_t ss, _pad1;
  64. vki_uint16_t es, _pad2;
  65. vki_uint16_t ds, _pad3;
  66. vki_uint16_t fs, _pad4;
  67. vki_uint16_t gs, _pad5;
  68. };
  69. #else
  70. struct vki_xen_cpu_user_regs {
  71. vki_uint64_t r15;
  72. vki_uint64_t r14;
  73. vki_uint64_t r13;
  74. vki_uint64_t r12;
  75. vki_uint64_t rbp;
  76. vki_uint64_t rbx;
  77. vki_uint64_t r11;
  78. vki_uint64_t r10;
  79. vki_uint64_t r9;
  80. vki_uint64_t r8;
  81. vki_uint64_t rax;
  82. vki_uint64_t rcx;
  83. vki_uint64_t rdx;
  84. vki_uint64_t rsi;
  85. vki_uint64_t rdi;
  86. vki_uint32_t error_code; /* private */
  87. vki_uint32_t entry_vector; /* private */
  88. vki_uint64_t rip;
  89. vki_uint16_t cs, _pad0[1];
  90. vki_uint8_t saved_upcall_mask;
  91. vki_uint8_t _pad1[3];
  92. vki_uint64_t rflags; /* rflags.IF == !saved_upcall_mask */
  93. vki_uint64_t rsp;
  94. vki_uint16_t ss, _pad2[3];
  95. vki_uint16_t es, _pad3[3];
  96. vki_uint16_t ds, _pad4[3];
  97. vki_uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
  98. vki_uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
  99. };
  100. #endif
  101. struct vki_xen_trap_info {
  102. vki_uint8_t vector; /* exception vector */
  103. vki_uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
  104. vki_uint16_t cs; /* code selector */
  105. unsigned long address; /* code offset */
  106. };
  107. struct vki_xen_vcpu_guest_context {
  108. /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
  109. struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
  110. unsigned long flags; /* VGCF_* flags */
  111. struct vki_xen_cpu_user_regs user_regs; /* User-level CPU registers */
  112. struct vki_xen_trap_info trap_ctxt[256];/* Virtual IDT */
  113. unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
  114. unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
  115. unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
  116. /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
  117. unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
  118. unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
  119. #ifdef __i386__
  120. unsigned long event_callback_cs; /* CS:EIP of event callback */
  121. unsigned long event_callback_eip;
  122. unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
  123. unsigned long failsafe_callback_eip;
  124. #else
  125. unsigned long event_callback_eip;
  126. unsigned long failsafe_callback_eip;
  127. unsigned long syscall_callback_eip;
  128. #endif
  129. unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
  130. #ifdef __x86_64__
  131. /* Segment base addresses. */
  132. vki_uint64_t fs_base;
  133. vki_uint64_t gs_base_kernel;
  134. vki_uint64_t gs_base_user;
  135. #endif
  136. };
  137. typedef struct vki_xen_vcpu_guest_context vki_xen_vcpu_guest_context_t;
  138. DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_vcpu_guest_context_t);
  139. /* HVM_SAVE types and declarations for getcontext_partial */
  140. # define VKI_DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
  141. struct __VKI_HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}
  142. #define VKI_HVM_SAVE_TYPE(_x) typeof (((struct __VKI_HVM_SAVE_TYPE_##_x *)(0))->t)
  143. #define VKI_HVM_SAVE_LENGTH(_x) (sizeof (VKI_HVM_SAVE_TYPE(_x)))
  144. #define VKI_HVM_SAVE_CODE(_x) (sizeof (((struct __VKI_HVM_SAVE_TYPE_##_x *)(0))->c))
  145. struct vki_hvm_hw_cpu {
  146. vki_uint8_t fpu_regs[512];
  147. vki_uint64_t rax;
  148. vki_uint64_t rbx;
  149. vki_uint64_t rcx;
  150. vki_uint64_t rdx;
  151. vki_uint64_t rbp;
  152. vki_uint64_t rsi;
  153. vki_uint64_t rdi;
  154. vki_uint64_t rsp;
  155. vki_uint64_t r8;
  156. vki_uint64_t r9;
  157. vki_uint64_t r10;
  158. vki_uint64_t r11;
  159. vki_uint64_t r12;
  160. vki_uint64_t r13;
  161. vki_uint64_t r14;
  162. vki_uint64_t r15;
  163. vki_uint64_t rip;
  164. vki_uint64_t rflags;
  165. vki_uint64_t cr0;
  166. vki_uint64_t cr2;
  167. vki_uint64_t cr3;
  168. vki_uint64_t cr4;
  169. vki_uint64_t dr0;
  170. vki_uint64_t dr1;
  171. vki_uint64_t dr2;
  172. vki_uint64_t dr3;
  173. vki_uint64_t dr6;
  174. vki_uint64_t dr7;
  175. vki_uint32_t cs_sel;
  176. vki_uint32_t ds_sel;
  177. vki_uint32_t es_sel;
  178. vki_uint32_t fs_sel;
  179. vki_uint32_t gs_sel;
  180. vki_uint32_t ss_sel;
  181. vki_uint32_t tr_sel;
  182. vki_uint32_t ldtr_sel;
  183. vki_uint32_t cs_limit;
  184. vki_uint32_t ds_limit;
  185. vki_uint32_t es_limit;
  186. vki_uint32_t fs_limit;
  187. vki_uint32_t gs_limit;
  188. vki_uint32_t ss_limit;
  189. vki_uint32_t tr_limit;
  190. vki_uint32_t ldtr_limit;
  191. vki_uint32_t idtr_limit;
  192. vki_uint32_t gdtr_limit;
  193. vki_uint64_t cs_base;
  194. vki_uint64_t ds_base;
  195. vki_uint64_t es_base;
  196. vki_uint64_t fs_base;
  197. vki_uint64_t gs_base;
  198. vki_uint64_t ss_base;
  199. vki_uint64_t tr_base;
  200. vki_uint64_t ldtr_base;
  201. vki_uint64_t idtr_base;
  202. vki_uint64_t gdtr_base;
  203. vki_uint32_t cs_arbytes;
  204. vki_uint32_t ds_arbytes;
  205. vki_uint32_t es_arbytes;
  206. vki_uint32_t fs_arbytes;
  207. vki_uint32_t gs_arbytes;
  208. vki_uint32_t ss_arbytes;
  209. vki_uint32_t tr_arbytes;
  210. vki_uint32_t ldtr_arbytes;
  211. vki_uint64_t sysenter_cs;
  212. vki_uint64_t sysenter_esp;
  213. vki_uint64_t sysenter_eip;
  214. /* msr for em64t */
  215. vki_uint64_t shadow_gs;
  216. /* msr content saved/restored. */
  217. vki_uint64_t msr_flags;
  218. vki_uint64_t msr_lstar;
  219. vki_uint64_t msr_star;
  220. vki_uint64_t msr_cstar;
  221. vki_uint64_t msr_syscall_mask;
  222. vki_uint64_t msr_efer;
  223. vki_uint64_t msr_tsc_aux;
  224. /* guest's idea of what rdtsc() would return */
  225. vki_uint64_t tsc;
  226. /* pending event, if any */
  227. union {
  228. vki_uint32_t pending_event;
  229. struct {
  230. vki_uint8_t pending_vector:8;
  231. vki_uint8_t pending_type:3;
  232. vki_uint8_t pending_error_valid:1;
  233. vki_uint32_t pending_reserved:19;
  234. vki_uint8_t pending_valid:1;
  235. };
  236. };
  237. /* error code for pending event */
  238. vki_uint32_t error_code;
  239. };
  240. VKI_DECLARE_HVM_SAVE_TYPE(CPU, 2, struct vki_hvm_hw_cpu);
  241. struct vki_hvm_hw_mtrr {
  242. #define VKI_MTRR_VCNT 8
  243. #define VKI_NUM_FIXED_MSR 11
  244. vki_uint64_t msr_pat_cr;
  245. /* mtrr physbase & physmask msr pair*/
  246. vki_uint64_t msr_mtrr_var[VKI_MTRR_VCNT*2];
  247. vki_uint64_t msr_mtrr_fixed[VKI_NUM_FIXED_MSR];
  248. vki_uint64_t msr_mtrr_cap;
  249. vki_uint64_t msr_mtrr_def_type;
  250. };
  251. VKI_DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct vki_hvm_hw_mtrr);
  252. #endif // __VKI_XEN_H
  253. /*--------------------------------------------------------------------*/
  254. /*--- end ---*/
  255. /*--------------------------------------------------------------------*/