wide-int.h 110 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469
  1. /* Operations with very long integers. -*- C++ -*-
  2. Copyright (C) 2012-2019 Free Software Foundation, Inc.
  3. This file is part of GCC.
  4. GCC is free software; you can redistribute it and/or modify it
  5. under the terms of the GNU General Public License as published by the
  6. Free Software Foundation; either version 3, or (at your option) any
  7. later version.
  8. GCC is distributed in the hope that it will be useful, but WITHOUT
  9. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  11. for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with GCC; see the file COPYING3. If not see
  14. <http://www.gnu.org/licenses/>. */
  15. #ifndef WIDE_INT_H
  16. #define WIDE_INT_H
  17. /* wide-int.[cc|h] implements a class that efficiently performs
  18. mathematical operations on finite precision integers. wide_ints
  19. are designed to be transient - they are not for long term storage
  20. of values. There is tight integration between wide_ints and the
  21. other longer storage GCC representations (rtl and tree).
  22. The actual precision of a wide_int depends on the flavor. There
  23. are three predefined flavors:
  24. 1) wide_int (the default). This flavor does the math in the
  25. precision of its input arguments. It is assumed (and checked)
  26. that the precisions of the operands and results are consistent.
  27. This is the most efficient flavor. It is not possible to examine
  28. bits above the precision that has been specified. Because of
  29. this, the default flavor has semantics that are simple to
  30. understand and in general model the underlying hardware that the
  31. compiler is targetted for.
  32. This flavor must be used at the RTL level of gcc because there
  33. is, in general, not enough information in the RTL representation
  34. to extend a value beyond the precision specified in the mode.
  35. This flavor should also be used at the TREE and GIMPLE levels of
  36. the compiler except for the circumstances described in the
  37. descriptions of the other two flavors.
  38. The default wide_int representation does not contain any
  39. information inherent about signedness of the represented value,
  40. so it can be used to represent both signed and unsigned numbers.
  41. For operations where the results depend on signedness (full width
  42. multiply, division, shifts, comparisons, and operations that need
  43. overflow detected), the signedness must be specified separately.
  44. 2) offset_int. This is a fixed-precision integer that can hold
  45. any address offset, measured in either bits or bytes, with at
  46. least one extra sign bit. At the moment the maximum address
  47. size GCC supports is 64 bits. With 8-bit bytes and an extra
  48. sign bit, offset_int therefore needs to have at least 68 bits
  49. of precision. We round this up to 128 bits for efficiency.
  50. Values of type T are converted to this precision by sign- or
  51. zero-extending them based on the signedness of T.
  52. The extra sign bit means that offset_int is effectively a signed
  53. 128-bit integer, i.e. it behaves like int128_t.
  54. Since the values are logically signed, there is no need to
  55. distinguish between signed and unsigned operations. Sign-sensitive
  56. comparison operators <, <=, > and >= are therefore supported.
  57. Shift operators << and >> are also supported, with >> being
  58. an _arithmetic_ right shift.
  59. [ Note that, even though offset_int is effectively int128_t,
  60. it can still be useful to use unsigned comparisons like
  61. wi::leu_p (a, b) as a more efficient short-hand for
  62. "a >= 0 && a <= b". ]
  63. 3) widest_int. This representation is an approximation of
  64. infinite precision math. However, it is not really infinite
  65. precision math as in the GMP library. It is really finite
  66. precision math where the precision is 4 times the size of the
  67. largest integer that the target port can represent.
  68. Like offset_int, widest_int is wider than all the values that
  69. it needs to represent, so the integers are logically signed.
  70. Sign-sensitive comparison operators <, <=, > and >= are supported,
  71. as are << and >>.
  72. There are several places in the GCC where this should/must be used:
  73. * Code that does induction variable optimizations. This code
  74. works with induction variables of many different types at the
  75. same time. Because of this, it ends up doing many different
  76. calculations where the operands are not compatible types. The
  77. widest_int makes this easy, because it provides a field where
  78. nothing is lost when converting from any variable,
  79. * There are a small number of passes that currently use the
  80. widest_int that should use the default. These should be
  81. changed.
  82. There are surprising features of offset_int and widest_int
  83. that the users should be careful about:
  84. 1) Shifts and rotations are just weird. You have to specify a
  85. precision in which the shift or rotate is to happen in. The bits
  86. above this precision are zeroed. While this is what you
  87. want, it is clearly non obvious.
  88. 2) Larger precision math sometimes does not produce the same
  89. answer as would be expected for doing the math at the proper
  90. precision. In particular, a multiply followed by a divide will
  91. produce a different answer if the first product is larger than
  92. what can be represented in the input precision.
  93. The offset_int and the widest_int flavors are more expensive
  94. than the default wide int, so in addition to the caveats with these
  95. two, the default is the prefered representation.
  96. All three flavors of wide_int are represented as a vector of
  97. HOST_WIDE_INTs. The default and widest_int vectors contain enough elements
  98. to hold a value of MAX_BITSIZE_MODE_ANY_INT bits. offset_int contains only
  99. enough elements to hold ADDR_MAX_PRECISION bits. The values are stored
  100. in the vector with the least significant HOST_BITS_PER_WIDE_INT bits
  101. in element 0.
  102. The default wide_int contains three fields: the vector (VAL),
  103. the precision and a length (LEN). The length is the number of HWIs
  104. needed to represent the value. widest_int and offset_int have a
  105. constant precision that cannot be changed, so they only store the
  106. VAL and LEN fields.
  107. Since most integers used in a compiler are small values, it is
  108. generally profitable to use a representation of the value that is
  109. as small as possible. LEN is used to indicate the number of
  110. elements of the vector that are in use. The numbers are stored as
  111. sign extended numbers as a means of compression. Leading
  112. HOST_WIDE_INTs that contain strings of either -1 or 0 are removed
  113. as long as they can be reconstructed from the top bit that is being
  114. represented.
  115. The precision and length of a wide_int are always greater than 0.
  116. Any bits in a wide_int above the precision are sign-extended from the
  117. most significant bit. For example, a 4-bit value 0x8 is represented as
  118. VAL = { 0xf...fff8 }. However, as an optimization, we allow other integer
  119. constants to be represented with undefined bits above the precision.
  120. This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN,
  121. so that the INTEGER_CST representation can be used both in TYPE_PRECISION
  122. and in wider precisions.
  123. There are constructors to create the various forms of wide_int from
  124. trees, rtl and constants. For trees the options are:
  125. tree t = ...;
  126. wi::to_wide (t) // Treat T as a wide_int
  127. wi::to_offset (t) // Treat T as an offset_int
  128. wi::to_widest (t) // Treat T as a widest_int
  129. All three are light-weight accessors that should have no overhead
  130. in release builds. If it is useful for readability reasons to
  131. store the result in a temporary variable, the preferred method is:
  132. wi::tree_to_wide_ref twide = wi::to_wide (t);
  133. wi::tree_to_offset_ref toffset = wi::to_offset (t);
  134. wi::tree_to_widest_ref twidest = wi::to_widest (t);
  135. To make an rtx into a wide_int, you have to pair it with a mode.
  136. The canonical way to do this is with rtx_mode_t as in:
  137. rtx r = ...
  138. wide_int x = rtx_mode_t (r, mode);
  139. Similarly, a wide_int can only be constructed from a host value if
  140. the target precision is given explicitly, such as in:
  141. wide_int x = wi::shwi (c, prec); // sign-extend C if necessary
  142. wide_int y = wi::uhwi (c, prec); // zero-extend C if necessary
  143. However, offset_int and widest_int have an inherent precision and so
  144. can be initialized directly from a host value:
  145. offset_int x = (int) c; // sign-extend C
  146. widest_int x = (unsigned int) c; // zero-extend C
  147. It is also possible to do arithmetic directly on rtx_mode_ts and
  148. constants. For example:
  149. wi::add (r1, r2); // add equal-sized rtx_mode_ts r1 and r2
  150. wi::add (r1, 1); // add 1 to rtx_mode_t r1
  151. wi::lshift (1, 100); // 1 << 100 as a widest_int
  152. Many binary operations place restrictions on the combinations of inputs,
  153. using the following rules:
  154. - {rtx, wide_int} op {rtx, wide_int} -> wide_int
  155. The inputs must be the same precision. The result is a wide_int
  156. of the same precision
  157. - {rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
  158. (un)signed HOST_WIDE_INT op {rtx, wide_int} -> wide_int
  159. The HOST_WIDE_INT is extended or truncated to the precision of
  160. the other input. The result is a wide_int of the same precision
  161. as that input.
  162. - (un)signed HOST_WIDE_INT op (un)signed HOST_WIDE_INT -> widest_int
  163. The inputs are extended to widest_int precision and produce a
  164. widest_int result.
  165. - offset_int op offset_int -> offset_int
  166. offset_int op (un)signed HOST_WIDE_INT -> offset_int
  167. (un)signed HOST_WIDE_INT op offset_int -> offset_int
  168. - widest_int op widest_int -> widest_int
  169. widest_int op (un)signed HOST_WIDE_INT -> widest_int
  170. (un)signed HOST_WIDE_INT op widest_int -> widest_int
  171. Other combinations like:
  172. - widest_int op offset_int and
  173. - wide_int op offset_int
  174. are not allowed. The inputs should instead be extended or truncated
  175. so that they match.
  176. The inputs to comparison functions like wi::eq_p and wi::lts_p
  177. follow the same compatibility rules, although their return types
  178. are different. Unary functions on X produce the same result as
  179. a binary operation X + X. Shift functions X op Y also produce
  180. the same result as X + X; the precision of the shift amount Y
  181. can be arbitrarily different from X. */
  182. /* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very
  183. early examination of the target's mode file. The WIDE_INT_MAX_ELTS
  184. can accomodate at least 1 more bit so that unsigned numbers of that
  185. mode can be represented as a signed value. Note that it is still
  186. possible to create fixed_wide_ints that have precisions greater than
  187. MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a
  188. double-width multiplication result, for example. */
  189. #define WIDE_INT_MAX_ELTS \
  190. ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) / HOST_BITS_PER_WIDE_INT)
  191. #define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
  192. /* This is the max size of any pointer on any machine. It does not
  193. seem to be as easy to sniff this out of the machine description as
  194. it is for MAX_BITSIZE_MODE_ANY_INT since targets may support
  195. multiple address sizes and may have different address sizes for
  196. different address spaces. However, currently the largest pointer
  197. on any platform is 64 bits. When that changes, then it is likely
  198. that a target hook should be defined so that targets can make this
  199. value larger for those targets. */
  200. #define ADDR_MAX_BITSIZE 64
  201. /* This is the internal precision used when doing any address
  202. arithmetic. The '4' is really 3 + 1. Three of the bits are for
  203. the number of extra bits needed to do bit addresses and the other bit
  204. is to allow everything to be signed without loosing any precision.
  205. Then everything is rounded up to the next HWI for efficiency. */
  206. #define ADDR_MAX_PRECISION \
  207. ((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) \
  208. & ~(HOST_BITS_PER_WIDE_INT - 1))
  209. /* The number of HWIs needed to store an offset_int. */
  210. #define OFFSET_INT_ELTS (ADDR_MAX_PRECISION / HOST_BITS_PER_WIDE_INT)
  211. /* The type of result produced by a binary operation on types T1 and T2.
  212. Defined purely for brevity. */
  213. #define WI_BINARY_RESULT(T1, T2) \
  214. typename wi::binary_traits <T1, T2>::result_type
  215. /* Likewise for binary operators, which excludes the case in which neither
  216. T1 nor T2 is a wide-int-based type. */
  217. #define WI_BINARY_OPERATOR_RESULT(T1, T2) \
  218. typename wi::binary_traits <T1, T2>::operator_result
  219. /* The type of result produced by T1 << T2. Leads to substitution failure
  220. if the operation isn't supported. Defined purely for brevity. */
  221. #define WI_SIGNED_SHIFT_RESULT(T1, T2) \
  222. typename wi::binary_traits <T1, T2>::signed_shift_result_type
  223. /* The type of result produced by a sign-agnostic binary predicate on
  224. types T1 and T2. This is bool if wide-int operations make sense for
  225. T1 and T2 and leads to substitution failure otherwise. */
  226. #define WI_BINARY_PREDICATE_RESULT(T1, T2) \
  227. typename wi::binary_traits <T1, T2>::predicate_result
  228. /* The type of result produced by a signed binary predicate on types T1 and T2.
  229. This is bool if signed comparisons make sense for T1 and T2 and leads to
  230. substitution failure otherwise. */
  231. #define WI_SIGNED_BINARY_PREDICATE_RESULT(T1, T2) \
  232. typename wi::binary_traits <T1, T2>::signed_predicate_result
  233. /* The type of result produced by a unary operation on type T. */
  234. #define WI_UNARY_RESULT(T) \
  235. typename wi::binary_traits <T, T>::result_type
  236. /* Define a variable RESULT to hold the result of a binary operation on
  237. X and Y, which have types T1 and T2 respectively. Define VAL to
  238. point to the blocks of RESULT. Once the user of the macro has
  239. filled in VAL, it should call RESULT.set_len to set the number
  240. of initialized blocks. */
  241. #define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \
  242. WI_BINARY_RESULT (T1, T2) RESULT = \
  243. wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \
  244. HOST_WIDE_INT *VAL = RESULT.write_val ()
  245. /* Similar for the result of a unary operation on X, which has type T. */
  246. #define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \
  247. WI_UNARY_RESULT (T) RESULT = \
  248. wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \
  249. HOST_WIDE_INT *VAL = RESULT.write_val ()
  250. template <typename T> class generic_wide_int;
  251. template <int N> class fixed_wide_int_storage;
  252. class wide_int_storage;
  253. /* An N-bit integer. Until we can use typedef templates, use this instead. */
  254. #define FIXED_WIDE_INT(N) \
  255. generic_wide_int < fixed_wide_int_storage <N> >
  256. typedef generic_wide_int <wide_int_storage> wide_int;
  257. typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
  258. typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
  259. /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
  260. so as not to confuse gengtype. */
  261. typedef generic_wide_int < fixed_wide_int_storage <WIDE_INT_MAX_PRECISION * 2> > widest2_int;
  262. /* wi::storage_ref can be a reference to a primitive type,
  263. so this is the conservatively-correct setting. */
  264. template <bool SE, bool HDP = true>
  265. struct wide_int_ref_storage;
  266. typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
  267. /* This can be used instead of wide_int_ref if the referenced value is
  268. known to have type T. It carries across properties of T's representation,
  269. such as whether excess upper bits in a HWI are defined, and can therefore
  270. help avoid redundant work.
  271. The macro could be replaced with a template typedef, once we're able
  272. to use those. */
  273. #define WIDE_INT_REF_FOR(T) \
  274. generic_wide_int \
  275. <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended, \
  276. wi::int_traits <T>::host_dependent_precision> >
  277. namespace wi
  278. {
  279. /* Operations that calculate overflow do so even for
  280. TYPE_OVERFLOW_WRAPS types. For example, adding 1 to +MAX_INT in
  281. an unsigned int is 0 and does not overflow in C/C++, but wi::add
  282. will set the overflow argument in case it's needed for further
  283. analysis.
  284. For operations that require overflow, these are the different
  285. types of overflow. */
  286. enum overflow_type {
  287. OVF_NONE = 0,
  288. OVF_UNDERFLOW = -1,
  289. OVF_OVERFLOW = 1,
  290. /* There was an overflow, but we are unsure whether it was an
  291. overflow or an underflow. */
  292. OVF_UNKNOWN = 2
  293. };
  294. /* Classifies an integer based on its precision. */
  295. enum precision_type {
  296. /* The integer has both a precision and defined signedness. This allows
  297. the integer to be converted to any width, since we know whether to fill
  298. any extra bits with zeros or signs. */
  299. FLEXIBLE_PRECISION,
  300. /* The integer has a variable precision but no defined signedness. */
  301. VAR_PRECISION,
  302. /* The integer has a constant precision (known at GCC compile time)
  303. and is signed. */
  304. CONST_PRECISION
  305. };
  306. /* This class, which has no default implementation, is expected to
  307. provide the following members:
  308. static const enum precision_type precision_type;
  309. Classifies the type of T.
  310. static const unsigned int precision;
  311. Only defined if precision_type == CONST_PRECISION. Specifies the
  312. precision of all integers of type T.
  313. static const bool host_dependent_precision;
  314. True if the precision of T depends (or can depend) on the host.
  315. static unsigned int get_precision (const T &x)
  316. Return the number of bits in X.
  317. static wi::storage_ref *decompose (HOST_WIDE_INT *scratch,
  318. unsigned int precision, const T &x)
  319. Decompose X as a PRECISION-bit integer, returning the associated
  320. wi::storage_ref. SCRATCH is available as scratch space if needed.
  321. The routine should assert that PRECISION is acceptable. */
  322. template <typename T> struct int_traits;
  323. /* This class provides a single type, result_type, which specifies the
  324. type of integer produced by a binary operation whose inputs have
  325. types T1 and T2. The definition should be symmetric. */
  326. template <typename T1, typename T2,
  327. enum precision_type P1 = int_traits <T1>::precision_type,
  328. enum precision_type P2 = int_traits <T2>::precision_type>
  329. struct binary_traits;
  330. /* Specify the result type for each supported combination of binary
  331. inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be
  332. mixed, in order to give stronger type checking. When both inputs
  333. are CONST_PRECISION, they must have the same precision. */
  334. template <typename T1, typename T2>
  335. struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
  336. {
  337. typedef widest_int result_type;
  338. /* Don't define operators for this combination. */
  339. };
  340. template <typename T1, typename T2>
  341. struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION>
  342. {
  343. typedef wide_int result_type;
  344. typedef result_type operator_result;
  345. typedef bool predicate_result;
  346. };
  347. template <typename T1, typename T2>
  348. struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
  349. {
  350. /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
  351. so as not to confuse gengtype. */
  352. typedef generic_wide_int < fixed_wide_int_storage
  353. <int_traits <T2>::precision> > result_type;
  354. typedef result_type operator_result;
  355. typedef bool predicate_result;
  356. typedef result_type signed_shift_result_type;
  357. typedef bool signed_predicate_result;
  358. };
  359. template <typename T1, typename T2>
  360. struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
  361. {
  362. typedef wide_int result_type;
  363. typedef result_type operator_result;
  364. typedef bool predicate_result;
  365. };
  366. template <typename T1, typename T2>
  367. struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
  368. {
  369. /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
  370. so as not to confuse gengtype. */
  371. typedef generic_wide_int < fixed_wide_int_storage
  372. <int_traits <T1>::precision> > result_type;
  373. typedef result_type operator_result;
  374. typedef bool predicate_result;
  375. typedef result_type signed_shift_result_type;
  376. typedef bool signed_predicate_result;
  377. };
  378. template <typename T1, typename T2>
  379. struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
  380. {
  381. STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
  382. /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
  383. so as not to confuse gengtype. */
  384. typedef generic_wide_int < fixed_wide_int_storage
  385. <int_traits <T1>::precision> > result_type;
  386. typedef result_type operator_result;
  387. typedef bool predicate_result;
  388. typedef result_type signed_shift_result_type;
  389. typedef bool signed_predicate_result;
  390. };
  391. template <typename T1, typename T2>
  392. struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
  393. {
  394. typedef wide_int result_type;
  395. typedef result_type operator_result;
  396. typedef bool predicate_result;
  397. };
  398. }
  399. /* Public functions for querying and operating on integers. */
  400. namespace wi
  401. {
  402. template <typename T>
  403. unsigned int get_precision (const T &);
  404. template <typename T1, typename T2>
  405. unsigned int get_binary_precision (const T1 &, const T2 &);
  406. template <typename T1, typename T2>
  407. void copy (T1 &, const T2 &);
  408. #define UNARY_PREDICATE \
  409. template <typename T> bool
  410. #define UNARY_FUNCTION \
  411. template <typename T> WI_UNARY_RESULT (T)
  412. #define BINARY_PREDICATE \
  413. template <typename T1, typename T2> bool
  414. #define BINARY_FUNCTION \
  415. template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2)
  416. #define SHIFT_FUNCTION \
  417. template <typename T1, typename T2> WI_UNARY_RESULT (T1)
  418. UNARY_PREDICATE fits_shwi_p (const T &);
  419. UNARY_PREDICATE fits_uhwi_p (const T &);
  420. UNARY_PREDICATE neg_p (const T &, signop = SIGNED);
  421. template <typename T>
  422. HOST_WIDE_INT sign_mask (const T &);
  423. BINARY_PREDICATE eq_p (const T1 &, const T2 &);
  424. BINARY_PREDICATE ne_p (const T1 &, const T2 &);
  425. BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop);
  426. BINARY_PREDICATE lts_p (const T1 &, const T2 &);
  427. BINARY_PREDICATE ltu_p (const T1 &, const T2 &);
  428. BINARY_PREDICATE le_p (const T1 &, const T2 &, signop);
  429. BINARY_PREDICATE les_p (const T1 &, const T2 &);
  430. BINARY_PREDICATE leu_p (const T1 &, const T2 &);
  431. BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop);
  432. BINARY_PREDICATE gts_p (const T1 &, const T2 &);
  433. BINARY_PREDICATE gtu_p (const T1 &, const T2 &);
  434. BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop);
  435. BINARY_PREDICATE ges_p (const T1 &, const T2 &);
  436. BINARY_PREDICATE geu_p (const T1 &, const T2 &);
  437. template <typename T1, typename T2>
  438. int cmp (const T1 &, const T2 &, signop);
  439. template <typename T1, typename T2>
  440. int cmps (const T1 &, const T2 &);
  441. template <typename T1, typename T2>
  442. int cmpu (const T1 &, const T2 &);
  443. UNARY_FUNCTION bit_not (const T &);
  444. UNARY_FUNCTION neg (const T &);
  445. UNARY_FUNCTION neg (const T &, overflow_type *);
  446. UNARY_FUNCTION abs (const T &);
  447. UNARY_FUNCTION ext (const T &, unsigned int, signop);
  448. UNARY_FUNCTION sext (const T &, unsigned int);
  449. UNARY_FUNCTION zext (const T &, unsigned int);
  450. UNARY_FUNCTION set_bit (const T &, unsigned int);
  451. BINARY_FUNCTION min (const T1 &, const T2 &, signop);
  452. BINARY_FUNCTION smin (const T1 &, const T2 &);
  453. BINARY_FUNCTION umin (const T1 &, const T2 &);
  454. BINARY_FUNCTION max (const T1 &, const T2 &, signop);
  455. BINARY_FUNCTION smax (const T1 &, const T2 &);
  456. BINARY_FUNCTION umax (const T1 &, const T2 &);
  457. BINARY_FUNCTION bit_and (const T1 &, const T2 &);
  458. BINARY_FUNCTION bit_and_not (const T1 &, const T2 &);
  459. BINARY_FUNCTION bit_or (const T1 &, const T2 &);
  460. BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
  461. BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
  462. BINARY_FUNCTION add (const T1 &, const T2 &);
  463. BINARY_FUNCTION add (const T1 &, const T2 &, signop, overflow_type *);
  464. BINARY_FUNCTION sub (const T1 &, const T2 &);
  465. BINARY_FUNCTION sub (const T1 &, const T2 &, signop, overflow_type *);
  466. BINARY_FUNCTION mul (const T1 &, const T2 &);
  467. BINARY_FUNCTION mul (const T1 &, const T2 &, signop, overflow_type *);
  468. BINARY_FUNCTION smul (const T1 &, const T2 &, overflow_type *);
  469. BINARY_FUNCTION umul (const T1 &, const T2 &, overflow_type *);
  470. BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
  471. BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop,
  472. overflow_type * = 0);
  473. BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
  474. BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
  475. BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop,
  476. overflow_type * = 0);
  477. BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
  478. BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
  479. BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop,
  480. overflow_type * = 0);
  481. BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &);
  482. BINARY_FUNCTION div_round (const T1 &, const T2 &, signop,
  483. overflow_type * = 0);
  484. BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
  485. WI_BINARY_RESULT (T1, T2) *);
  486. BINARY_FUNCTION gcd (const T1 &, const T2 &, signop = UNSIGNED);
  487. BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop,
  488. overflow_type * = 0);
  489. BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
  490. BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
  491. BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop,
  492. overflow_type * = 0);
  493. BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
  494. BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop,
  495. overflow_type * = 0);
  496. BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop,
  497. overflow_type * = 0);
  498. template <typename T1, typename T2>
  499. bool multiple_of_p (const T1 &, const T2 &, signop);
  500. template <typename T1, typename T2>
  501. bool multiple_of_p (const T1 &, const T2 &, signop,
  502. WI_BINARY_RESULT (T1, T2) *);
  503. SHIFT_FUNCTION lshift (const T1 &, const T2 &);
  504. SHIFT_FUNCTION lrshift (const T1 &, const T2 &);
  505. SHIFT_FUNCTION arshift (const T1 &, const T2 &);
  506. SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn);
  507. SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int = 0);
  508. SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int = 0);
  509. #undef SHIFT_FUNCTION
  510. #undef BINARY_PREDICATE
  511. #undef BINARY_FUNCTION
  512. #undef UNARY_PREDICATE
  513. #undef UNARY_FUNCTION
  514. bool only_sign_bit_p (const wide_int_ref &, unsigned int);
  515. bool only_sign_bit_p (const wide_int_ref &);
  516. int clz (const wide_int_ref &);
  517. int clrsb (const wide_int_ref &);
  518. int ctz (const wide_int_ref &);
  519. int exact_log2 (const wide_int_ref &);
  520. int floor_log2 (const wide_int_ref &);
  521. int ffs (const wide_int_ref &);
  522. int popcount (const wide_int_ref &);
  523. int parity (const wide_int_ref &);
  524. template <typename T>
  525. unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int);
  526. template <typename T>
  527. unsigned int min_precision (const T &, signop);
  528. static inline void accumulate_overflow (overflow_type &, overflow_type);
  529. }
  530. namespace wi
  531. {
  532. /* Contains the components of a decomposed integer for easy, direct
  533. access. */
  534. struct storage_ref
  535. {
  536. storage_ref () {}
  537. storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int);
  538. const HOST_WIDE_INT *val;
  539. unsigned int len;
  540. unsigned int precision;
  541. /* Provide enough trappings for this class to act as storage for
  542. generic_wide_int. */
  543. unsigned int get_len () const;
  544. unsigned int get_precision () const;
  545. const HOST_WIDE_INT *get_val () const;
  546. };
  547. }
  548. inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in,
  549. unsigned int len_in,
  550. unsigned int precision_in)
  551. : val (val_in), len (len_in), precision (precision_in)
  552. {
  553. }
  554. inline unsigned int
  555. wi::storage_ref::get_len () const
  556. {
  557. return len;
  558. }
  559. inline unsigned int
  560. wi::storage_ref::get_precision () const
  561. {
  562. return precision;
  563. }
  564. inline const HOST_WIDE_INT *
  565. wi::storage_ref::get_val () const
  566. {
  567. return val;
  568. }
  569. /* This class defines an integer type using the storage provided by the
  570. template argument. The storage class must provide the following
  571. functions:
  572. unsigned int get_precision () const
  573. Return the number of bits in the integer.
  574. HOST_WIDE_INT *get_val () const
  575. Return a pointer to the array of blocks that encodes the integer.
  576. unsigned int get_len () const
  577. Return the number of blocks in get_val (). If this is smaller
  578. than the number of blocks implied by get_precision (), the
  579. remaining blocks are sign extensions of block get_len () - 1.
  580. Although not required by generic_wide_int itself, writable storage
  581. classes can also provide the following functions:
  582. HOST_WIDE_INT *write_val ()
  583. Get a modifiable version of get_val ()
  584. unsigned int set_len (unsigned int len)
  585. Set the value returned by get_len () to LEN. */
  586. template <typename storage>
  587. class GTY(()) generic_wide_int : public storage
  588. {
  589. public:
  590. generic_wide_int ();
  591. template <typename T>
  592. generic_wide_int (const T &);
  593. template <typename T>
  594. generic_wide_int (const T &, unsigned int);
  595. /* Conversions. */
  596. HOST_WIDE_INT to_shwi (unsigned int) const;
  597. HOST_WIDE_INT to_shwi () const;
  598. unsigned HOST_WIDE_INT to_uhwi (unsigned int) const;
  599. unsigned HOST_WIDE_INT to_uhwi () const;
  600. HOST_WIDE_INT to_short_addr () const;
  601. /* Public accessors for the interior of a wide int. */
  602. HOST_WIDE_INT sign_mask () const;
  603. HOST_WIDE_INT elt (unsigned int) const;
  604. unsigned HOST_WIDE_INT ulow () const;
  605. unsigned HOST_WIDE_INT uhigh () const;
  606. HOST_WIDE_INT slow () const;
  607. HOST_WIDE_INT shigh () const;
  608. template <typename T>
  609. generic_wide_int &operator = (const T &);
  610. #define ASSIGNMENT_OPERATOR(OP, F) \
  611. template <typename T> \
  612. generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
  613. /* Restrict these to cases where the shift operator is defined. */
  614. #define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \
  615. template <typename T> \
  616. generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); }
  617. #define INCDEC_OPERATOR(OP, DELTA) \
  618. generic_wide_int &OP () { *this += DELTA; return *this; }
  619. ASSIGNMENT_OPERATOR (operator &=, bit_and)
  620. ASSIGNMENT_OPERATOR (operator |=, bit_or)
  621. ASSIGNMENT_OPERATOR (operator ^=, bit_xor)
  622. ASSIGNMENT_OPERATOR (operator +=, add)
  623. ASSIGNMENT_OPERATOR (operator -=, sub)
  624. ASSIGNMENT_OPERATOR (operator *=, mul)
  625. ASSIGNMENT_OPERATOR (operator <<=, lshift)
  626. SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>)
  627. INCDEC_OPERATOR (operator ++, 1)
  628. INCDEC_OPERATOR (operator --, -1)
  629. #undef SHIFT_ASSIGNMENT_OPERATOR
  630. #undef ASSIGNMENT_OPERATOR
  631. #undef INCDEC_OPERATOR
  632. /* Debugging functions. */
  633. void dump () const;
  634. static const bool is_sign_extended
  635. = wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
  636. };
  637. template <typename storage>
  638. inline generic_wide_int <storage>::generic_wide_int () {}
  639. template <typename storage>
  640. template <typename T>
  641. inline generic_wide_int <storage>::generic_wide_int (const T &x)
  642. : storage (x)
  643. {
  644. }
  645. template <typename storage>
  646. template <typename T>
  647. inline generic_wide_int <storage>::generic_wide_int (const T &x,
  648. unsigned int precision)
  649. : storage (x, precision)
  650. {
  651. }
  652. /* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION.
  653. If THIS does not fit in PRECISION, the information is lost. */
  654. template <typename storage>
  655. inline HOST_WIDE_INT
  656. generic_wide_int <storage>::to_shwi (unsigned int precision) const
  657. {
  658. if (precision < HOST_BITS_PER_WIDE_INT)
  659. return sext_hwi (this->get_val ()[0], precision);
  660. else
  661. return this->get_val ()[0];
  662. }
  663. /* Return THIS as a signed HOST_WIDE_INT, in its natural precision. */
  664. template <typename storage>
  665. inline HOST_WIDE_INT
  666. generic_wide_int <storage>::to_shwi () const
  667. {
  668. if (is_sign_extended)
  669. return this->get_val ()[0];
  670. else
  671. return to_shwi (this->get_precision ());
  672. }
  673. /* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from
  674. PRECISION. If THIS does not fit in PRECISION, the information
  675. is lost. */
  676. template <typename storage>
  677. inline unsigned HOST_WIDE_INT
  678. generic_wide_int <storage>::to_uhwi (unsigned int precision) const
  679. {
  680. if (precision < HOST_BITS_PER_WIDE_INT)
  681. return zext_hwi (this->get_val ()[0], precision);
  682. else
  683. return this->get_val ()[0];
  684. }
  685. /* Return THIS as an signed HOST_WIDE_INT, in its natural precision. */
  686. template <typename storage>
  687. inline unsigned HOST_WIDE_INT
  688. generic_wide_int <storage>::to_uhwi () const
  689. {
  690. return to_uhwi (this->get_precision ());
  691. }
  692. /* TODO: The compiler is half converted from using HOST_WIDE_INT to
  693. represent addresses to using offset_int to represent addresses.
  694. We use to_short_addr at the interface from new code to old,
  695. unconverted code. */
  696. template <typename storage>
  697. inline HOST_WIDE_INT
  698. generic_wide_int <storage>::to_short_addr () const
  699. {
  700. return this->get_val ()[0];
  701. }
  702. /* Return the implicit value of blocks above get_len (). */
  703. template <typename storage>
  704. inline HOST_WIDE_INT
  705. generic_wide_int <storage>::sign_mask () const
  706. {
  707. unsigned int len = this->get_len ();
  708. unsigned HOST_WIDE_INT high = this->get_val ()[len - 1];
  709. if (!is_sign_extended)
  710. {
  711. unsigned int precision = this->get_precision ();
  712. int excess = len * HOST_BITS_PER_WIDE_INT - precision;
  713. if (excess > 0)
  714. high <<= excess;
  715. }
  716. return (HOST_WIDE_INT) (high) < 0 ? -1 : 0;
  717. }
  718. /* Return the signed value of the least-significant explicitly-encoded
  719. block. */
  720. template <typename storage>
  721. inline HOST_WIDE_INT
  722. generic_wide_int <storage>::slow () const
  723. {
  724. return this->get_val ()[0];
  725. }
  726. /* Return the signed value of the most-significant explicitly-encoded
  727. block. */
  728. template <typename storage>
  729. inline HOST_WIDE_INT
  730. generic_wide_int <storage>::shigh () const
  731. {
  732. return this->get_val ()[this->get_len () - 1];
  733. }
  734. /* Return the unsigned value of the least-significant
  735. explicitly-encoded block. */
  736. template <typename storage>
  737. inline unsigned HOST_WIDE_INT
  738. generic_wide_int <storage>::ulow () const
  739. {
  740. return this->get_val ()[0];
  741. }
  742. /* Return the unsigned value of the most-significant
  743. explicitly-encoded block. */
  744. template <typename storage>
  745. inline unsigned HOST_WIDE_INT
  746. generic_wide_int <storage>::uhigh () const
  747. {
  748. return this->get_val ()[this->get_len () - 1];
  749. }
  750. /* Return block I, which might be implicitly or explicit encoded. */
  751. template <typename storage>
  752. inline HOST_WIDE_INT
  753. generic_wide_int <storage>::elt (unsigned int i) const
  754. {
  755. if (i >= this->get_len ())
  756. return sign_mask ();
  757. else
  758. return this->get_val ()[i];
  759. }
  760. template <typename storage>
  761. template <typename T>
  762. inline generic_wide_int <storage> &
  763. generic_wide_int <storage>::operator = (const T &x)
  764. {
  765. storage::operator = (x);
  766. return *this;
  767. }
  768. /* Dump the contents of the integer to stderr, for debugging. */
  769. template <typename storage>
  770. void
  771. generic_wide_int <storage>::dump () const
  772. {
  773. unsigned int len = this->get_len ();
  774. const HOST_WIDE_INT *val = this->get_val ();
  775. unsigned int precision = this->get_precision ();
  776. fprintf (stderr, "[");
  777. if (len * HOST_BITS_PER_WIDE_INT < precision)
  778. fprintf (stderr, "...,");
  779. for (unsigned int i = 0; i < len - 1; ++i)
  780. fprintf (stderr, HOST_WIDE_INT_PRINT_HEX ",", val[len - 1 - i]);
  781. fprintf (stderr, HOST_WIDE_INT_PRINT_HEX "], precision = %d\n",
  782. val[0], precision);
  783. }
  784. namespace wi
  785. {
  786. template <typename storage>
  787. struct int_traits < generic_wide_int <storage> >
  788. : public wi::int_traits <storage>
  789. {
  790. static unsigned int get_precision (const generic_wide_int <storage> &);
  791. static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
  792. const generic_wide_int <storage> &);
  793. };
  794. }
  795. template <typename storage>
  796. inline unsigned int
  797. wi::int_traits < generic_wide_int <storage> >::
  798. get_precision (const generic_wide_int <storage> &x)
  799. {
  800. return x.get_precision ();
  801. }
  802. template <typename storage>
  803. inline wi::storage_ref
  804. wi::int_traits < generic_wide_int <storage> >::
  805. decompose (HOST_WIDE_INT *, unsigned int precision,
  806. const generic_wide_int <storage> &x)
  807. {
  808. gcc_checking_assert (precision == x.get_precision ());
  809. return wi::storage_ref (x.get_val (), x.get_len (), precision);
  810. }
  811. /* Provide the storage for a wide_int_ref. This acts like a read-only
  812. wide_int, with the optimization that VAL is normally a pointer to
  813. another integer's storage, so that no array copy is needed. */
  814. template <bool SE, bool HDP>
  815. struct wide_int_ref_storage : public wi::storage_ref
  816. {
  817. private:
  818. /* Scratch space that can be used when decomposing the original integer.
  819. It must live as long as this object. */
  820. HOST_WIDE_INT scratch[2];
  821. public:
  822. wide_int_ref_storage () {}
  823. wide_int_ref_storage (const wi::storage_ref &);
  824. template <typename T>
  825. wide_int_ref_storage (const T &);
  826. template <typename T>
  827. wide_int_ref_storage (const T &, unsigned int);
  828. };
  829. /* Create a reference from an existing reference. */
  830. template <bool SE, bool HDP>
  831. inline wide_int_ref_storage <SE, HDP>::
  832. wide_int_ref_storage (const wi::storage_ref &x)
  833. : storage_ref (x)
  834. {}
  835. /* Create a reference to integer X in its natural precision. Note
  836. that the natural precision is host-dependent for primitive
  837. types. */
  838. template <bool SE, bool HDP>
  839. template <typename T>
  840. inline wide_int_ref_storage <SE, HDP>::wide_int_ref_storage (const T &x)
  841. : storage_ref (wi::int_traits <T>::decompose (scratch,
  842. wi::get_precision (x), x))
  843. {
  844. }
  845. /* Create a reference to integer X in precision PRECISION. */
  846. template <bool SE, bool HDP>
  847. template <typename T>
  848. inline wide_int_ref_storage <SE, HDP>::
  849. wide_int_ref_storage (const T &x, unsigned int precision)
  850. : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
  851. {
  852. }
  853. namespace wi
  854. {
  855. template <bool SE, bool HDP>
  856. struct int_traits <wide_int_ref_storage <SE, HDP> >
  857. {
  858. static const enum precision_type precision_type = VAR_PRECISION;
  859. static const bool host_dependent_precision = HDP;
  860. static const bool is_sign_extended = SE;
  861. };
  862. }
  863. namespace wi
  864. {
  865. unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  866. unsigned int, unsigned int, unsigned int,
  867. signop sgn);
  868. unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  869. unsigned int, unsigned int, bool = true);
  870. }
  871. /* The storage used by wide_int. */
  872. class GTY(()) wide_int_storage
  873. {
  874. private:
  875. HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
  876. unsigned int len;
  877. unsigned int precision;
  878. public:
  879. wide_int_storage ();
  880. template <typename T>
  881. wide_int_storage (const T &);
  882. /* The standard generic_wide_int storage methods. */
  883. unsigned int get_precision () const;
  884. const HOST_WIDE_INT *get_val () const;
  885. unsigned int get_len () const;
  886. HOST_WIDE_INT *write_val ();
  887. void set_len (unsigned int, bool = false);
  888. template <typename T>
  889. wide_int_storage &operator = (const T &);
  890. static wide_int from (const wide_int_ref &, unsigned int, signop);
  891. static wide_int from_array (const HOST_WIDE_INT *, unsigned int,
  892. unsigned int, bool = true);
  893. static wide_int create (unsigned int);
  894. /* FIXME: target-dependent, so should disappear. */
  895. wide_int bswap () const;
  896. };
  897. namespace wi
  898. {
  899. template <>
  900. struct int_traits <wide_int_storage>
  901. {
  902. static const enum precision_type precision_type = VAR_PRECISION;
  903. /* Guaranteed by a static assert in the wide_int_storage constructor. */
  904. static const bool host_dependent_precision = false;
  905. static const bool is_sign_extended = true;
  906. template <typename T1, typename T2>
  907. static wide_int get_binary_result (const T1 &, const T2 &);
  908. };
  909. }
  910. inline wide_int_storage::wide_int_storage () {}
  911. /* Initialize the storage from integer X, in its natural precision.
  912. Note that we do not allow integers with host-dependent precision
  913. to become wide_ints; wide_ints must always be logically independent
  914. of the host. */
  915. template <typename T>
  916. inline wide_int_storage::wide_int_storage (const T &x)
  917. {
  918. { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
  919. { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
  920. WIDE_INT_REF_FOR (T) xi (x);
  921. precision = xi.precision;
  922. wi::copy (*this, xi);
  923. }
  924. template <typename T>
  925. inline wide_int_storage&
  926. wide_int_storage::operator = (const T &x)
  927. {
  928. { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
  929. { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
  930. WIDE_INT_REF_FOR (T) xi (x);
  931. precision = xi.precision;
  932. wi::copy (*this, xi);
  933. return *this;
  934. }
  935. inline unsigned int
  936. wide_int_storage::get_precision () const
  937. {
  938. return precision;
  939. }
  940. inline const HOST_WIDE_INT *
  941. wide_int_storage::get_val () const
  942. {
  943. return val;
  944. }
  945. inline unsigned int
  946. wide_int_storage::get_len () const
  947. {
  948. return len;
  949. }
  950. inline HOST_WIDE_INT *
  951. wide_int_storage::write_val ()
  952. {
  953. return val;
  954. }
  955. inline void
  956. wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
  957. {
  958. len = l;
  959. if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
  960. val[len - 1] = sext_hwi (val[len - 1],
  961. precision % HOST_BITS_PER_WIDE_INT);
  962. }
  963. /* Treat X as having signedness SGN and convert it to a PRECISION-bit
  964. number. */
  965. inline wide_int
  966. wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
  967. signop sgn)
  968. {
  969. wide_int result = wide_int::create (precision);
  970. result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
  971. x.precision, precision, sgn));
  972. return result;
  973. }
  974. /* Create a wide_int from the explicit block encoding given by VAL and
  975. LEN. PRECISION is the precision of the integer. NEED_CANON_P is
  976. true if the encoding may have redundant trailing blocks. */
  977. inline wide_int
  978. wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
  979. unsigned int precision, bool need_canon_p)
  980. {
  981. wide_int result = wide_int::create (precision);
  982. result.set_len (wi::from_array (result.write_val (), val, len, precision,
  983. need_canon_p));
  984. return result;
  985. }
  986. /* Return an uninitialized wide_int with precision PRECISION. */
  987. inline wide_int
  988. wide_int_storage::create (unsigned int precision)
  989. {
  990. wide_int x;
  991. x.precision = precision;
  992. return x;
  993. }
  994. template <typename T1, typename T2>
  995. inline wide_int
  996. wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
  997. {
  998. /* This shouldn't be used for two flexible-precision inputs. */
  999. STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
  1000. || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION);
  1001. if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
  1002. return wide_int::create (wi::get_precision (y));
  1003. else
  1004. return wide_int::create (wi::get_precision (x));
  1005. }
  1006. /* The storage used by FIXED_WIDE_INT (N). */
  1007. template <int N>
  1008. class GTY(()) fixed_wide_int_storage
  1009. {
  1010. private:
  1011. HOST_WIDE_INT val[(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT];
  1012. unsigned int len;
  1013. public:
  1014. fixed_wide_int_storage ();
  1015. template <typename T>
  1016. fixed_wide_int_storage (const T &);
  1017. /* The standard generic_wide_int storage methods. */
  1018. unsigned int get_precision () const;
  1019. const HOST_WIDE_INT *get_val () const;
  1020. unsigned int get_len () const;
  1021. HOST_WIDE_INT *write_val ();
  1022. void set_len (unsigned int, bool = false);
  1023. static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
  1024. static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
  1025. bool = true);
  1026. };
  1027. namespace wi
  1028. {
  1029. template <int N>
  1030. struct int_traits < fixed_wide_int_storage <N> >
  1031. {
  1032. static const enum precision_type precision_type = CONST_PRECISION;
  1033. static const bool host_dependent_precision = false;
  1034. static const bool is_sign_extended = true;
  1035. static const unsigned int precision = N;
  1036. template <typename T1, typename T2>
  1037. static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
  1038. };
  1039. }
  1040. template <int N>
  1041. inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
  1042. /* Initialize the storage from integer X, in precision N. */
  1043. template <int N>
  1044. template <typename T>
  1045. inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x)
  1046. {
  1047. /* Check for type compatibility. We don't want to initialize a
  1048. fixed-width integer from something like a wide_int. */
  1049. WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED;
  1050. wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
  1051. }
  1052. template <int N>
  1053. inline unsigned int
  1054. fixed_wide_int_storage <N>::get_precision () const
  1055. {
  1056. return N;
  1057. }
  1058. template <int N>
  1059. inline const HOST_WIDE_INT *
  1060. fixed_wide_int_storage <N>::get_val () const
  1061. {
  1062. return val;
  1063. }
  1064. template <int N>
  1065. inline unsigned int
  1066. fixed_wide_int_storage <N>::get_len () const
  1067. {
  1068. return len;
  1069. }
  1070. template <int N>
  1071. inline HOST_WIDE_INT *
  1072. fixed_wide_int_storage <N>::write_val ()
  1073. {
  1074. return val;
  1075. }
  1076. template <int N>
  1077. inline void
  1078. fixed_wide_int_storage <N>::set_len (unsigned int l, bool)
  1079. {
  1080. len = l;
  1081. /* There are no excess bits in val[len - 1]. */
  1082. STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0);
  1083. }
  1084. /* Treat X as having signedness SGN and convert it to an N-bit number. */
  1085. template <int N>
  1086. inline FIXED_WIDE_INT (N)
  1087. fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
  1088. {
  1089. FIXED_WIDE_INT (N) result;
  1090. result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
  1091. x.precision, N, sgn));
  1092. return result;
  1093. }
  1094. /* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by
  1095. VAL and LEN. NEED_CANON_P is true if the encoding may have redundant
  1096. trailing blocks. */
  1097. template <int N>
  1098. inline FIXED_WIDE_INT (N)
  1099. fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val,
  1100. unsigned int len,
  1101. bool need_canon_p)
  1102. {
  1103. FIXED_WIDE_INT (N) result;
  1104. result.set_len (wi::from_array (result.write_val (), val, len,
  1105. N, need_canon_p));
  1106. return result;
  1107. }
  1108. template <int N>
  1109. template <typename T1, typename T2>
  1110. inline FIXED_WIDE_INT (N)
  1111. wi::int_traits < fixed_wide_int_storage <N> >::
  1112. get_binary_result (const T1 &, const T2 &)
  1113. {
  1114. return FIXED_WIDE_INT (N) ();
  1115. }
  1116. /* A reference to one element of a trailing_wide_ints structure. */
  1117. class trailing_wide_int_storage
  1118. {
  1119. private:
  1120. /* The precision of the integer, which is a fixed property of the
  1121. parent trailing_wide_ints. */
  1122. unsigned int m_precision;
  1123. /* A pointer to the length field. */
  1124. unsigned char *m_len;
  1125. /* A pointer to the HWI array. There are enough elements to hold all
  1126. values of precision M_PRECISION. */
  1127. HOST_WIDE_INT *m_val;
  1128. public:
  1129. trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INT *);
  1130. /* The standard generic_wide_int storage methods. */
  1131. unsigned int get_len () const;
  1132. unsigned int get_precision () const;
  1133. const HOST_WIDE_INT *get_val () const;
  1134. HOST_WIDE_INT *write_val ();
  1135. void set_len (unsigned int, bool = false);
  1136. template <typename T>
  1137. trailing_wide_int_storage &operator = (const T &);
  1138. };
  1139. typedef generic_wide_int <trailing_wide_int_storage> trailing_wide_int;
  1140. /* trailing_wide_int behaves like a wide_int. */
  1141. namespace wi
  1142. {
  1143. template <>
  1144. struct int_traits <trailing_wide_int_storage>
  1145. : public int_traits <wide_int_storage> {};
  1146. }
  1147. /* An array of N wide_int-like objects that can be put at the end of
  1148. a variable-sized structure. Use extra_size to calculate how many
  1149. bytes beyond the sizeof need to be allocated. Use set_precision
  1150. to initialize the structure. */
  1151. template <int N>
  1152. class GTY((user)) trailing_wide_ints
  1153. {
  1154. private:
  1155. /* The shared precision of each number. */
  1156. unsigned short m_precision;
  1157. /* The shared maximum length of each number. */
  1158. unsigned char m_max_len;
  1159. /* The current length of each number. */
  1160. unsigned char m_len[N];
  1161. /* The variable-length part of the structure, which always contains
  1162. at least one HWI. Element I starts at index I * M_MAX_LEN. */
  1163. HOST_WIDE_INT m_val[1];
  1164. public:
  1165. typedef WIDE_INT_REF_FOR (trailing_wide_int_storage) const_reference;
  1166. void set_precision (unsigned int);
  1167. unsigned int get_precision () const { return m_precision; }
  1168. trailing_wide_int operator [] (unsigned int);
  1169. const_reference operator [] (unsigned int) const;
  1170. static size_t extra_size (unsigned int);
  1171. size_t extra_size () const { return extra_size (m_precision); }
  1172. };
  1173. inline trailing_wide_int_storage::
  1174. trailing_wide_int_storage (unsigned int precision, unsigned char *len,
  1175. HOST_WIDE_INT *val)
  1176. : m_precision (precision), m_len (len), m_val (val)
  1177. {
  1178. }
  1179. inline unsigned int
  1180. trailing_wide_int_storage::get_len () const
  1181. {
  1182. return *m_len;
  1183. }
  1184. inline unsigned int
  1185. trailing_wide_int_storage::get_precision () const
  1186. {
  1187. return m_precision;
  1188. }
  1189. inline const HOST_WIDE_INT *
  1190. trailing_wide_int_storage::get_val () const
  1191. {
  1192. return m_val;
  1193. }
  1194. inline HOST_WIDE_INT *
  1195. trailing_wide_int_storage::write_val ()
  1196. {
  1197. return m_val;
  1198. }
  1199. inline void
  1200. trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended)
  1201. {
  1202. *m_len = len;
  1203. if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > m_precision)
  1204. m_val[len - 1] = sext_hwi (m_val[len - 1],
  1205. m_precision % HOST_BITS_PER_WIDE_INT);
  1206. }
  1207. template <typename T>
  1208. inline trailing_wide_int_storage &
  1209. trailing_wide_int_storage::operator = (const T &x)
  1210. {
  1211. WIDE_INT_REF_FOR (T) xi (x, m_precision);
  1212. wi::copy (*this, xi);
  1213. return *this;
  1214. }
  1215. /* Initialize the structure and record that all elements have precision
  1216. PRECISION. */
  1217. template <int N>
  1218. inline void
  1219. trailing_wide_ints <N>::set_precision (unsigned int precision)
  1220. {
  1221. m_precision = precision;
  1222. m_max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
  1223. / HOST_BITS_PER_WIDE_INT);
  1224. }
  1225. /* Return a reference to element INDEX. */
  1226. template <int N>
  1227. inline trailing_wide_int
  1228. trailing_wide_ints <N>::operator [] (unsigned int index)
  1229. {
  1230. return trailing_wide_int_storage (m_precision, &m_len[index],
  1231. &m_val[index * m_max_len]);
  1232. }
  1233. template <int N>
  1234. inline typename trailing_wide_ints <N>::const_reference
  1235. trailing_wide_ints <N>::operator [] (unsigned int index) const
  1236. {
  1237. return wi::storage_ref (&m_val[index * m_max_len],
  1238. m_len[index], m_precision);
  1239. }
  1240. /* Return how many extra bytes need to be added to the end of the structure
  1241. in order to handle N wide_ints of precision PRECISION. */
  1242. template <int N>
  1243. inline size_t
  1244. trailing_wide_ints <N>::extra_size (unsigned int precision)
  1245. {
  1246. unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
  1247. / HOST_BITS_PER_WIDE_INT);
  1248. return (N * max_len - 1) * sizeof (HOST_WIDE_INT);
  1249. }
  1250. /* This macro is used in structures that end with a trailing_wide_ints field
  1251. called FIELD. It declares get_NAME() and set_NAME() methods to access
  1252. element I of FIELD. */
  1253. #define TRAILING_WIDE_INT_ACCESSOR(NAME, FIELD, I) \
  1254. trailing_wide_int get_##NAME () { return FIELD[I]; } \
  1255. template <typename T> void set_##NAME (const T &x) { FIELD[I] = x; }
  1256. namespace wi
  1257. {
  1258. /* Implementation of int_traits for primitive integer types like "int". */
  1259. template <typename T, bool signed_p>
  1260. struct primitive_int_traits
  1261. {
  1262. static const enum precision_type precision_type = FLEXIBLE_PRECISION;
  1263. static const bool host_dependent_precision = true;
  1264. static const bool is_sign_extended = true;
  1265. static unsigned int get_precision (T);
  1266. static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
  1267. };
  1268. }
  1269. template <typename T, bool signed_p>
  1270. inline unsigned int
  1271. wi::primitive_int_traits <T, signed_p>::get_precision (T)
  1272. {
  1273. return sizeof (T) * CHAR_BIT;
  1274. }
  1275. template <typename T, bool signed_p>
  1276. inline wi::storage_ref
  1277. wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch,
  1278. unsigned int precision, T x)
  1279. {
  1280. scratch[0] = x;
  1281. if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
  1282. return wi::storage_ref (scratch, 1, precision);
  1283. scratch[1] = 0;
  1284. return wi::storage_ref (scratch, 2, precision);
  1285. }
  1286. /* Allow primitive C types to be used in wi:: routines. */
  1287. namespace wi
  1288. {
  1289. template <>
  1290. struct int_traits <unsigned char>
  1291. : public primitive_int_traits <unsigned char, false> {};
  1292. template <>
  1293. struct int_traits <unsigned short>
  1294. : public primitive_int_traits <unsigned short, false> {};
  1295. template <>
  1296. struct int_traits <int>
  1297. : public primitive_int_traits <int, true> {};
  1298. template <>
  1299. struct int_traits <unsigned int>
  1300. : public primitive_int_traits <unsigned int, false> {};
  1301. template <>
  1302. struct int_traits <long>
  1303. : public primitive_int_traits <long, true> {};
  1304. template <>
  1305. struct int_traits <unsigned long>
  1306. : public primitive_int_traits <unsigned long, false> {};
  1307. #if defined HAVE_LONG_LONG
  1308. template <>
  1309. struct int_traits <long long>
  1310. : public primitive_int_traits <long long, true> {};
  1311. template <>
  1312. struct int_traits <unsigned long long>
  1313. : public primitive_int_traits <unsigned long long, false> {};
  1314. #endif
  1315. }
  1316. namespace wi
  1317. {
  1318. /* Stores HWI-sized integer VAL, treating it as having signedness SGN
  1319. and precision PRECISION. */
  1320. struct hwi_with_prec
  1321. {
  1322. hwi_with_prec () {}
  1323. hwi_with_prec (HOST_WIDE_INT, unsigned int, signop);
  1324. HOST_WIDE_INT val;
  1325. unsigned int precision;
  1326. signop sgn;
  1327. };
  1328. hwi_with_prec shwi (HOST_WIDE_INT, unsigned int);
  1329. hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int);
  1330. hwi_with_prec minus_one (unsigned int);
  1331. hwi_with_prec zero (unsigned int);
  1332. hwi_with_prec one (unsigned int);
  1333. hwi_with_prec two (unsigned int);
  1334. }
  1335. inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p,
  1336. signop s)
  1337. : precision (p), sgn (s)
  1338. {
  1339. if (precision < HOST_BITS_PER_WIDE_INT)
  1340. val = sext_hwi (v, precision);
  1341. else
  1342. val = v;
  1343. }
  1344. /* Return a signed integer that has value VAL and precision PRECISION. */
  1345. inline wi::hwi_with_prec
  1346. wi::shwi (HOST_WIDE_INT val, unsigned int precision)
  1347. {
  1348. return hwi_with_prec (val, precision, SIGNED);
  1349. }
  1350. /* Return an unsigned integer that has value VAL and precision PRECISION. */
  1351. inline wi::hwi_with_prec
  1352. wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision)
  1353. {
  1354. return hwi_with_prec (val, precision, UNSIGNED);
  1355. }
  1356. /* Return a wide int of -1 with precision PRECISION. */
  1357. inline wi::hwi_with_prec
  1358. wi::minus_one (unsigned int precision)
  1359. {
  1360. return wi::shwi (-1, precision);
  1361. }
  1362. /* Return a wide int of 0 with precision PRECISION. */
  1363. inline wi::hwi_with_prec
  1364. wi::zero (unsigned int precision)
  1365. {
  1366. return wi::shwi (0, precision);
  1367. }
  1368. /* Return a wide int of 1 with precision PRECISION. */
  1369. inline wi::hwi_with_prec
  1370. wi::one (unsigned int precision)
  1371. {
  1372. return wi::shwi (1, precision);
  1373. }
  1374. /* Return a wide int of 2 with precision PRECISION. */
  1375. inline wi::hwi_with_prec
  1376. wi::two (unsigned int precision)
  1377. {
  1378. return wi::shwi (2, precision);
  1379. }
  1380. namespace wi
  1381. {
  1382. /* ints_for<T>::zero (X) returns a zero that, when asssigned to a T,
  1383. gives that T the same precision as X. */
  1384. template<typename T, precision_type = int_traits<T>::precision_type>
  1385. struct ints_for
  1386. {
  1387. static int zero (const T &) { return 0; }
  1388. };
  1389. template<typename T>
  1390. struct ints_for<T, VAR_PRECISION>
  1391. {
  1392. static hwi_with_prec zero (const T &);
  1393. };
  1394. }
  1395. template<typename T>
  1396. inline wi::hwi_with_prec
  1397. wi::ints_for<T, wi::VAR_PRECISION>::zero (const T &x)
  1398. {
  1399. return wi::zero (wi::get_precision (x));
  1400. }
  1401. namespace wi
  1402. {
  1403. template <>
  1404. struct int_traits <wi::hwi_with_prec>
  1405. {
  1406. static const enum precision_type precision_type = VAR_PRECISION;
  1407. /* hwi_with_prec has an explicitly-given precision, rather than the
  1408. precision of HOST_WIDE_INT. */
  1409. static const bool host_dependent_precision = false;
  1410. static const bool is_sign_extended = true;
  1411. static unsigned int get_precision (const wi::hwi_with_prec &);
  1412. static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
  1413. const wi::hwi_with_prec &);
  1414. };
  1415. }
  1416. inline unsigned int
  1417. wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x)
  1418. {
  1419. return x.precision;
  1420. }
  1421. inline wi::storage_ref
  1422. wi::int_traits <wi::hwi_with_prec>::
  1423. decompose (HOST_WIDE_INT *scratch, unsigned int precision,
  1424. const wi::hwi_with_prec &x)
  1425. {
  1426. gcc_checking_assert (precision == x.precision);
  1427. scratch[0] = x.val;
  1428. if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
  1429. return wi::storage_ref (scratch, 1, precision);
  1430. scratch[1] = 0;
  1431. return wi::storage_ref (scratch, 2, precision);
  1432. }
  1433. /* Private functions for handling large cases out of line. They take
  1434. individual length and array parameters because that is cheaper for
  1435. the inline caller than constructing an object on the stack and
  1436. passing a reference to it. (Although many callers use wide_int_refs,
  1437. we generally want those to be removed by SRA.) */
  1438. namespace wi
  1439. {
  1440. bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
  1441. const HOST_WIDE_INT *, unsigned int, unsigned int);
  1442. bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
  1443. const HOST_WIDE_INT *, unsigned int);
  1444. bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
  1445. const HOST_WIDE_INT *, unsigned int);
  1446. int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
  1447. const HOST_WIDE_INT *, unsigned int);
  1448. int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
  1449. const HOST_WIDE_INT *, unsigned int);
  1450. unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1451. unsigned int,
  1452. unsigned int, unsigned int);
  1453. unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1454. unsigned int,
  1455. unsigned int, unsigned int);
  1456. unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1457. unsigned int, unsigned int, unsigned int);
  1458. unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1459. unsigned int, unsigned int, unsigned int);
  1460. unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1461. unsigned int, unsigned int, unsigned int,
  1462. unsigned int);
  1463. unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1464. unsigned int, unsigned int, unsigned int,
  1465. unsigned int);
  1466. unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
  1467. const HOST_WIDE_INT *, unsigned int, unsigned int);
  1468. unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1469. unsigned int, const HOST_WIDE_INT *,
  1470. unsigned int, unsigned int);
  1471. unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
  1472. const HOST_WIDE_INT *, unsigned int, unsigned int);
  1473. unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1474. unsigned int, const HOST_WIDE_INT *,
  1475. unsigned int, unsigned int);
  1476. unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
  1477. const HOST_WIDE_INT *, unsigned int, unsigned int);
  1478. unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
  1479. const HOST_WIDE_INT *, unsigned int, unsigned int,
  1480. signop, overflow_type *);
  1481. unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
  1482. const HOST_WIDE_INT *, unsigned int, unsigned int,
  1483. signop, overflow_type *);
  1484. unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1485. unsigned int, const HOST_WIDE_INT *,
  1486. unsigned int, unsigned int, signop,
  1487. overflow_type *, bool);
  1488. unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *,
  1489. HOST_WIDE_INT *, const HOST_WIDE_INT *,
  1490. unsigned int, unsigned int,
  1491. const HOST_WIDE_INT *,
  1492. unsigned int, unsigned int,
  1493. signop, overflow_type *);
  1494. }
  1495. /* Return the number of bits that integer X can hold. */
  1496. template <typename T>
  1497. inline unsigned int
  1498. wi::get_precision (const T &x)
  1499. {
  1500. return wi::int_traits <T>::get_precision (x);
  1501. }
  1502. /* Return the number of bits that the result of a binary operation can
  1503. hold when the input operands are X and Y. */
  1504. template <typename T1, typename T2>
  1505. inline unsigned int
  1506. wi::get_binary_precision (const T1 &x, const T2 &y)
  1507. {
  1508. return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>::
  1509. get_binary_result (x, y));
  1510. }
  1511. /* Copy the contents of Y to X, but keeping X's current precision. */
  1512. template <typename T1, typename T2>
  1513. inline void
  1514. wi::copy (T1 &x, const T2 &y)
  1515. {
  1516. HOST_WIDE_INT *xval = x.write_val ();
  1517. const HOST_WIDE_INT *yval = y.get_val ();
  1518. unsigned int len = y.get_len ();
  1519. unsigned int i = 0;
  1520. do
  1521. xval[i] = yval[i];
  1522. while (++i < len);
  1523. x.set_len (len, y.is_sign_extended);
  1524. }
  1525. /* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
  1526. template <typename T>
  1527. inline bool
  1528. wi::fits_shwi_p (const T &x)
  1529. {
  1530. WIDE_INT_REF_FOR (T) xi (x);
  1531. return xi.len == 1;
  1532. }
  1533. /* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of
  1534. precision. */
  1535. template <typename T>
  1536. inline bool
  1537. wi::fits_uhwi_p (const T &x)
  1538. {
  1539. WIDE_INT_REF_FOR (T) xi (x);
  1540. if (xi.precision <= HOST_BITS_PER_WIDE_INT)
  1541. return true;
  1542. if (xi.len == 1)
  1543. return xi.slow () >= 0;
  1544. return xi.len == 2 && xi.uhigh () == 0;
  1545. }
  1546. /* Return true if X is negative based on the interpretation of SGN.
  1547. For UNSIGNED, this is always false. */
  1548. template <typename T>
  1549. inline bool
  1550. wi::neg_p (const T &x, signop sgn)
  1551. {
  1552. WIDE_INT_REF_FOR (T) xi (x);
  1553. if (sgn == UNSIGNED)
  1554. return false;
  1555. return xi.sign_mask () < 0;
  1556. }
  1557. /* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
  1558. template <typename T>
  1559. inline HOST_WIDE_INT
  1560. wi::sign_mask (const T &x)
  1561. {
  1562. WIDE_INT_REF_FOR (T) xi (x);
  1563. return xi.sign_mask ();
  1564. }
  1565. /* Return true if X == Y. X and Y must be binary-compatible. */
  1566. template <typename T1, typename T2>
  1567. inline bool
  1568. wi::eq_p (const T1 &x, const T2 &y)
  1569. {
  1570. unsigned int precision = get_binary_precision (x, y);
  1571. WIDE_INT_REF_FOR (T1) xi (x, precision);
  1572. WIDE_INT_REF_FOR (T2) yi (y, precision);
  1573. if (xi.is_sign_extended && yi.is_sign_extended)
  1574. {
  1575. /* This case reduces to array equality. */
  1576. if (xi.len != yi.len)
  1577. return false;
  1578. unsigned int i = 0;
  1579. do
  1580. if (xi.val[i] != yi.val[i])
  1581. return false;
  1582. while (++i != xi.len);
  1583. return true;
  1584. }
  1585. if (__builtin_expect (yi.len == 1, true))
  1586. {
  1587. /* XI is only equal to YI if it too has a single HWI. */
  1588. if (xi.len != 1)
  1589. return false;
  1590. /* Excess bits in xi.val[0] will be signs or zeros, so comparisons
  1591. with 0 are simple. */
  1592. if (STATIC_CONSTANT_P (yi.val[0] == 0))
  1593. return xi.val[0] == 0;
  1594. /* Otherwise flush out any excess bits first. */
  1595. unsigned HOST_WIDE_INT diff = xi.val[0] ^ yi.val[0];
  1596. int excess = HOST_BITS_PER_WIDE_INT - precision;
  1597. if (excess > 0)
  1598. diff <<= excess;
  1599. return diff == 0;
  1600. }
  1601. return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
  1602. }
  1603. /* Return true if X != Y. X and Y must be binary-compatible. */
  1604. template <typename T1, typename T2>
  1605. inline bool
  1606. wi::ne_p (const T1 &x, const T2 &y)
  1607. {
  1608. return !eq_p (x, y);
  1609. }
  1610. /* Return true if X < Y when both are treated as signed values. */
  1611. template <typename T1, typename T2>
  1612. inline bool
  1613. wi::lts_p (const T1 &x, const T2 &y)
  1614. {
  1615. unsigned int precision = get_binary_precision (x, y);
  1616. WIDE_INT_REF_FOR (T1) xi (x, precision);
  1617. WIDE_INT_REF_FOR (T2) yi (y, precision);
  1618. /* We optimize x < y, where y is 64 or fewer bits. */
  1619. if (wi::fits_shwi_p (yi))
  1620. {
  1621. /* Make lts_p (x, 0) as efficient as wi::neg_p (x). */
  1622. if (STATIC_CONSTANT_P (yi.val[0] == 0))
  1623. return neg_p (xi);
  1624. /* If x fits directly into a shwi, we can compare directly. */
  1625. if (wi::fits_shwi_p (xi))
  1626. return xi.to_shwi () < yi.to_shwi ();
  1627. /* If x doesn't fit and is negative, then it must be more
  1628. negative than any value in y, and hence smaller than y. */
  1629. if (neg_p (xi))
  1630. return true;
  1631. /* If x is positive, then it must be larger than any value in y,
  1632. and hence greater than y. */
  1633. return false;
  1634. }
  1635. /* Optimize the opposite case, if it can be detected at compile time. */
  1636. if (STATIC_CONSTANT_P (xi.len == 1))
  1637. /* If YI is negative it is lower than the least HWI.
  1638. If YI is positive it is greater than the greatest HWI. */
  1639. return !neg_p (yi);
  1640. return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len);
  1641. }
  1642. /* Return true if X < Y when both are treated as unsigned values. */
  1643. template <typename T1, typename T2>
  1644. inline bool
  1645. wi::ltu_p (const T1 &x, const T2 &y)
  1646. {
  1647. unsigned int precision = get_binary_precision (x, y);
  1648. WIDE_INT_REF_FOR (T1) xi (x, precision);
  1649. WIDE_INT_REF_FOR (T2) yi (y, precision);
  1650. /* Optimize comparisons with constants. */
  1651. if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
  1652. return xi.len == 1 && xi.to_uhwi () < (unsigned HOST_WIDE_INT) yi.val[0];
  1653. if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
  1654. return yi.len != 1 || yi.to_uhwi () > (unsigned HOST_WIDE_INT) xi.val[0];
  1655. /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
  1656. for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
  1657. values does not change the result. */
  1658. if (__builtin_expect (xi.len + yi.len == 2, true))
  1659. {
  1660. unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
  1661. unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
  1662. return xl < yl;
  1663. }
  1664. return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len);
  1665. }
  1666. /* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
  1667. template <typename T1, typename T2>
  1668. inline bool
  1669. wi::lt_p (const T1 &x, const T2 &y, signop sgn)
  1670. {
  1671. if (sgn == SIGNED)
  1672. return lts_p (x, y);
  1673. else
  1674. return ltu_p (x, y);
  1675. }
  1676. /* Return true if X <= Y when both are treated as signed values. */
  1677. template <typename T1, typename T2>
  1678. inline bool
  1679. wi::les_p (const T1 &x, const T2 &y)
  1680. {
  1681. return !lts_p (y, x);
  1682. }
  1683. /* Return true if X <= Y when both are treated as unsigned values. */
  1684. template <typename T1, typename T2>
  1685. inline bool
  1686. wi::leu_p (const T1 &x, const T2 &y)
  1687. {
  1688. return !ltu_p (y, x);
  1689. }
  1690. /* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */
  1691. template <typename T1, typename T2>
  1692. inline bool
  1693. wi::le_p (const T1 &x, const T2 &y, signop sgn)
  1694. {
  1695. if (sgn == SIGNED)
  1696. return les_p (x, y);
  1697. else
  1698. return leu_p (x, y);
  1699. }
  1700. /* Return true if X > Y when both are treated as signed values. */
  1701. template <typename T1, typename T2>
  1702. inline bool
  1703. wi::gts_p (const T1 &x, const T2 &y)
  1704. {
  1705. return lts_p (y, x);
  1706. }
  1707. /* Return true if X > Y when both are treated as unsigned values. */
  1708. template <typename T1, typename T2>
  1709. inline bool
  1710. wi::gtu_p (const T1 &x, const T2 &y)
  1711. {
  1712. return ltu_p (y, x);
  1713. }
  1714. /* Return true if X > Y. Signedness of X and Y is indicated by SGN. */
  1715. template <typename T1, typename T2>
  1716. inline bool
  1717. wi::gt_p (const T1 &x, const T2 &y, signop sgn)
  1718. {
  1719. if (sgn == SIGNED)
  1720. return gts_p (x, y);
  1721. else
  1722. return gtu_p (x, y);
  1723. }
  1724. /* Return true if X >= Y when both are treated as signed values. */
  1725. template <typename T1, typename T2>
  1726. inline bool
  1727. wi::ges_p (const T1 &x, const T2 &y)
  1728. {
  1729. return !lts_p (x, y);
  1730. }
  1731. /* Return true if X >= Y when both are treated as unsigned values. */
  1732. template <typename T1, typename T2>
  1733. inline bool
  1734. wi::geu_p (const T1 &x, const T2 &y)
  1735. {
  1736. return !ltu_p (x, y);
  1737. }
  1738. /* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */
  1739. template <typename T1, typename T2>
  1740. inline bool
  1741. wi::ge_p (const T1 &x, const T2 &y, signop sgn)
  1742. {
  1743. if (sgn == SIGNED)
  1744. return ges_p (x, y);
  1745. else
  1746. return geu_p (x, y);
  1747. }
  1748. /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
  1749. as signed values. */
  1750. template <typename T1, typename T2>
  1751. inline int
  1752. wi::cmps (const T1 &x, const T2 &y)
  1753. {
  1754. unsigned int precision = get_binary_precision (x, y);
  1755. WIDE_INT_REF_FOR (T1) xi (x, precision);
  1756. WIDE_INT_REF_FOR (T2) yi (y, precision);
  1757. if (wi::fits_shwi_p (yi))
  1758. {
  1759. /* Special case for comparisons with 0. */
  1760. if (STATIC_CONSTANT_P (yi.val[0] == 0))
  1761. return neg_p (xi) ? -1 : !(xi.len == 1 && xi.val[0] == 0);
  1762. /* If x fits into a signed HWI, we can compare directly. */
  1763. if (wi::fits_shwi_p (xi))
  1764. {
  1765. HOST_WIDE_INT xl = xi.to_shwi ();
  1766. HOST_WIDE_INT yl = yi.to_shwi ();
  1767. return xl < yl ? -1 : xl > yl;
  1768. }
  1769. /* If x doesn't fit and is negative, then it must be more
  1770. negative than any signed HWI, and hence smaller than y. */
  1771. if (neg_p (xi))
  1772. return -1;
  1773. /* If x is positive, then it must be larger than any signed HWI,
  1774. and hence greater than y. */
  1775. return 1;
  1776. }
  1777. /* Optimize the opposite case, if it can be detected at compile time. */
  1778. if (STATIC_CONSTANT_P (xi.len == 1))
  1779. /* If YI is negative it is lower than the least HWI.
  1780. If YI is positive it is greater than the greatest HWI. */
  1781. return neg_p (yi) ? 1 : -1;
  1782. return cmps_large (xi.val, xi.len, precision, yi.val, yi.len);
  1783. }
  1784. /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
  1785. as unsigned values. */
  1786. template <typename T1, typename T2>
  1787. inline int
  1788. wi::cmpu (const T1 &x, const T2 &y)
  1789. {
  1790. unsigned int precision = get_binary_precision (x, y);
  1791. WIDE_INT_REF_FOR (T1) xi (x, precision);
  1792. WIDE_INT_REF_FOR (T2) yi (y, precision);
  1793. /* Optimize comparisons with constants. */
  1794. if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
  1795. {
  1796. /* If XI doesn't fit in a HWI then it must be larger than YI. */
  1797. if (xi.len != 1)
  1798. return 1;
  1799. /* Otherwise compare directly. */
  1800. unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
  1801. unsigned HOST_WIDE_INT yl = yi.val[0];
  1802. return xl < yl ? -1 : xl > yl;
  1803. }
  1804. if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
  1805. {
  1806. /* If YI doesn't fit in a HWI then it must be larger than XI. */
  1807. if (yi.len != 1)
  1808. return -1;
  1809. /* Otherwise compare directly. */
  1810. unsigned HOST_WIDE_INT xl = xi.val[0];
  1811. unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
  1812. return xl < yl ? -1 : xl > yl;
  1813. }
  1814. /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
  1815. for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
  1816. values does not change the result. */
  1817. if (__builtin_expect (xi.len + yi.len == 2, true))
  1818. {
  1819. unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
  1820. unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
  1821. return xl < yl ? -1 : xl > yl;
  1822. }
  1823. return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len);
  1824. }
  1825. /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of
  1826. X and Y indicated by SGN. */
  1827. template <typename T1, typename T2>
  1828. inline int
  1829. wi::cmp (const T1 &x, const T2 &y, signop sgn)
  1830. {
  1831. if (sgn == SIGNED)
  1832. return cmps (x, y);
  1833. else
  1834. return cmpu (x, y);
  1835. }
  1836. /* Return ~x. */
  1837. template <typename T>
  1838. inline WI_UNARY_RESULT (T)
  1839. wi::bit_not (const T &x)
  1840. {
  1841. WI_UNARY_RESULT_VAR (result, val, T, x);
  1842. WIDE_INT_REF_FOR (T) xi (x, get_precision (result));
  1843. for (unsigned int i = 0; i < xi.len; ++i)
  1844. val[i] = ~xi.val[i];
  1845. result.set_len (xi.len);
  1846. return result;
  1847. }
  1848. /* Return -x. */
  1849. template <typename T>
  1850. inline WI_UNARY_RESULT (T)
  1851. wi::neg (const T &x)
  1852. {
  1853. return sub (0, x);
  1854. }
  1855. /* Return -x. Indicate in *OVERFLOW if performing the negation would
  1856. cause an overflow. */
  1857. template <typename T>
  1858. inline WI_UNARY_RESULT (T)
  1859. wi::neg (const T &x, overflow_type *overflow)
  1860. {
  1861. *overflow = only_sign_bit_p (x) ? OVF_OVERFLOW : OVF_NONE;
  1862. return sub (0, x);
  1863. }
  1864. /* Return the absolute value of x. */
  1865. template <typename T>
  1866. inline WI_UNARY_RESULT (T)
  1867. wi::abs (const T &x)
  1868. {
  1869. return neg_p (x) ? neg (x) : WI_UNARY_RESULT (T) (x);
  1870. }
  1871. /* Return the result of sign-extending the low OFFSET bits of X. */
  1872. template <typename T>
  1873. inline WI_UNARY_RESULT (T)
  1874. wi::sext (const T &x, unsigned int offset)
  1875. {
  1876. WI_UNARY_RESULT_VAR (result, val, T, x);
  1877. unsigned int precision = get_precision (result);
  1878. WIDE_INT_REF_FOR (T) xi (x, precision);
  1879. if (offset <= HOST_BITS_PER_WIDE_INT)
  1880. {
  1881. val[0] = sext_hwi (xi.ulow (), offset);
  1882. result.set_len (1, true);
  1883. }
  1884. else
  1885. result.set_len (sext_large (val, xi.val, xi.len, precision, offset));
  1886. return result;
  1887. }
  1888. /* Return the result of zero-extending the low OFFSET bits of X. */
  1889. template <typename T>
  1890. inline WI_UNARY_RESULT (T)
  1891. wi::zext (const T &x, unsigned int offset)
  1892. {
  1893. WI_UNARY_RESULT_VAR (result, val, T, x);
  1894. unsigned int precision = get_precision (result);
  1895. WIDE_INT_REF_FOR (T) xi (x, precision);
  1896. /* This is not just an optimization, it is actually required to
  1897. maintain canonization. */
  1898. if (offset >= precision)
  1899. {
  1900. wi::copy (result, xi);
  1901. return result;
  1902. }
  1903. /* In these cases we know that at least the top bit will be clear,
  1904. so no sign extension is necessary. */
  1905. if (offset < HOST_BITS_PER_WIDE_INT)
  1906. {
  1907. val[0] = zext_hwi (xi.ulow (), offset);
  1908. result.set_len (1, true);
  1909. }
  1910. else
  1911. result.set_len (zext_large (val, xi.val, xi.len, precision, offset), true);
  1912. return result;
  1913. }
  1914. /* Return the result of extending the low OFFSET bits of X according to
  1915. signedness SGN. */
  1916. template <typename T>
  1917. inline WI_UNARY_RESULT (T)
  1918. wi::ext (const T &x, unsigned int offset, signop sgn)
  1919. {
  1920. return sgn == SIGNED ? sext (x, offset) : zext (x, offset);
  1921. }
  1922. /* Return an integer that represents X | (1 << bit). */
  1923. template <typename T>
  1924. inline WI_UNARY_RESULT (T)
  1925. wi::set_bit (const T &x, unsigned int bit)
  1926. {
  1927. WI_UNARY_RESULT_VAR (result, val, T, x);
  1928. unsigned int precision = get_precision (result);
  1929. WIDE_INT_REF_FOR (T) xi (x, precision);
  1930. if (precision <= HOST_BITS_PER_WIDE_INT)
  1931. {
  1932. val[0] = xi.ulow () | (HOST_WIDE_INT_1U << bit);
  1933. result.set_len (1);
  1934. }
  1935. else
  1936. result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit));
  1937. return result;
  1938. }
  1939. /* Return the mininum of X and Y, treating them both as having
  1940. signedness SGN. */
  1941. template <typename T1, typename T2>
  1942. inline WI_BINARY_RESULT (T1, T2)
  1943. wi::min (const T1 &x, const T2 &y, signop sgn)
  1944. {
  1945. WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
  1946. unsigned int precision = get_precision (result);
  1947. if (wi::le_p (x, y, sgn))
  1948. wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
  1949. else
  1950. wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
  1951. return result;
  1952. }
  1953. /* Return the minimum of X and Y, treating both as signed values. */
  1954. template <typename T1, typename T2>
  1955. inline WI_BINARY_RESULT (T1, T2)
  1956. wi::smin (const T1 &x, const T2 &y)
  1957. {
  1958. return wi::min (x, y, SIGNED);
  1959. }
  1960. /* Return the minimum of X and Y, treating both as unsigned values. */
  1961. template <typename T1, typename T2>
  1962. inline WI_BINARY_RESULT (T1, T2)
  1963. wi::umin (const T1 &x, const T2 &y)
  1964. {
  1965. return wi::min (x, y, UNSIGNED);
  1966. }
  1967. /* Return the maxinum of X and Y, treating them both as having
  1968. signedness SGN. */
  1969. template <typename T1, typename T2>
  1970. inline WI_BINARY_RESULT (T1, T2)
  1971. wi::max (const T1 &x, const T2 &y, signop sgn)
  1972. {
  1973. WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
  1974. unsigned int precision = get_precision (result);
  1975. if (wi::ge_p (x, y, sgn))
  1976. wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
  1977. else
  1978. wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
  1979. return result;
  1980. }
  1981. /* Return the maximum of X and Y, treating both as signed values. */
  1982. template <typename T1, typename T2>
  1983. inline WI_BINARY_RESULT (T1, T2)
  1984. wi::smax (const T1 &x, const T2 &y)
  1985. {
  1986. return wi::max (x, y, SIGNED);
  1987. }
  1988. /* Return the maximum of X and Y, treating both as unsigned values. */
  1989. template <typename T1, typename T2>
  1990. inline WI_BINARY_RESULT (T1, T2)
  1991. wi::umax (const T1 &x, const T2 &y)
  1992. {
  1993. return wi::max (x, y, UNSIGNED);
  1994. }
  1995. /* Return X & Y. */
  1996. template <typename T1, typename T2>
  1997. inline WI_BINARY_RESULT (T1, T2)
  1998. wi::bit_and (const T1 &x, const T2 &y)
  1999. {
  2000. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2001. unsigned int precision = get_precision (result);
  2002. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2003. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2004. bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
  2005. if (__builtin_expect (xi.len + yi.len == 2, true))
  2006. {
  2007. val[0] = xi.ulow () & yi.ulow ();
  2008. result.set_len (1, is_sign_extended);
  2009. }
  2010. else
  2011. result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len,
  2012. precision), is_sign_extended);
  2013. return result;
  2014. }
  2015. /* Return X & ~Y. */
  2016. template <typename T1, typename T2>
  2017. inline WI_BINARY_RESULT (T1, T2)
  2018. wi::bit_and_not (const T1 &x, const T2 &y)
  2019. {
  2020. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2021. unsigned int precision = get_precision (result);
  2022. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2023. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2024. bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
  2025. if (__builtin_expect (xi.len + yi.len == 2, true))
  2026. {
  2027. val[0] = xi.ulow () & ~yi.ulow ();
  2028. result.set_len (1, is_sign_extended);
  2029. }
  2030. else
  2031. result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len,
  2032. precision), is_sign_extended);
  2033. return result;
  2034. }
  2035. /* Return X | Y. */
  2036. template <typename T1, typename T2>
  2037. inline WI_BINARY_RESULT (T1, T2)
  2038. wi::bit_or (const T1 &x, const T2 &y)
  2039. {
  2040. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2041. unsigned int precision = get_precision (result);
  2042. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2043. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2044. bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
  2045. if (__builtin_expect (xi.len + yi.len == 2, true))
  2046. {
  2047. val[0] = xi.ulow () | yi.ulow ();
  2048. result.set_len (1, is_sign_extended);
  2049. }
  2050. else
  2051. result.set_len (or_large (val, xi.val, xi.len,
  2052. yi.val, yi.len, precision), is_sign_extended);
  2053. return result;
  2054. }
  2055. /* Return X | ~Y. */
  2056. template <typename T1, typename T2>
  2057. inline WI_BINARY_RESULT (T1, T2)
  2058. wi::bit_or_not (const T1 &x, const T2 &y)
  2059. {
  2060. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2061. unsigned int precision = get_precision (result);
  2062. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2063. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2064. bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
  2065. if (__builtin_expect (xi.len + yi.len == 2, true))
  2066. {
  2067. val[0] = xi.ulow () | ~yi.ulow ();
  2068. result.set_len (1, is_sign_extended);
  2069. }
  2070. else
  2071. result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len,
  2072. precision), is_sign_extended);
  2073. return result;
  2074. }
  2075. /* Return X ^ Y. */
  2076. template <typename T1, typename T2>
  2077. inline WI_BINARY_RESULT (T1, T2)
  2078. wi::bit_xor (const T1 &x, const T2 &y)
  2079. {
  2080. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2081. unsigned int precision = get_precision (result);
  2082. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2083. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2084. bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
  2085. if (__builtin_expect (xi.len + yi.len == 2, true))
  2086. {
  2087. val[0] = xi.ulow () ^ yi.ulow ();
  2088. result.set_len (1, is_sign_extended);
  2089. }
  2090. else
  2091. result.set_len (xor_large (val, xi.val, xi.len,
  2092. yi.val, yi.len, precision), is_sign_extended);
  2093. return result;
  2094. }
  2095. /* Return X + Y. */
  2096. template <typename T1, typename T2>
  2097. inline WI_BINARY_RESULT (T1, T2)
  2098. wi::add (const T1 &x, const T2 &y)
  2099. {
  2100. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2101. unsigned int precision = get_precision (result);
  2102. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2103. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2104. if (precision <= HOST_BITS_PER_WIDE_INT)
  2105. {
  2106. val[0] = xi.ulow () + yi.ulow ();
  2107. result.set_len (1);
  2108. }
  2109. /* If the precision is known at compile time to be greater than
  2110. HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
  2111. knowing that (a) all bits in those HWIs are significant and
  2112. (b) the result has room for at least two HWIs. This provides
  2113. a fast path for things like offset_int and widest_int.
  2114. The STATIC_CONSTANT_P test prevents this path from being
  2115. used for wide_ints. wide_ints with precisions greater than
  2116. HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
  2117. point handling them inline. */
  2118. else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
  2119. && __builtin_expect (xi.len + yi.len == 2, true))
  2120. {
  2121. unsigned HOST_WIDE_INT xl = xi.ulow ();
  2122. unsigned HOST_WIDE_INT yl = yi.ulow ();
  2123. unsigned HOST_WIDE_INT resultl = xl + yl;
  2124. val[0] = resultl;
  2125. val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
  2126. result.set_len (1 + (((resultl ^ xl) & (resultl ^ yl))
  2127. >> (HOST_BITS_PER_WIDE_INT - 1)));
  2128. }
  2129. else
  2130. result.set_len (add_large (val, xi.val, xi.len,
  2131. yi.val, yi.len, precision,
  2132. UNSIGNED, 0));
  2133. return result;
  2134. }
  2135. /* Return X + Y. Treat X and Y as having the signednes given by SGN
  2136. and indicate in *OVERFLOW whether the operation overflowed. */
  2137. template <typename T1, typename T2>
  2138. inline WI_BINARY_RESULT (T1, T2)
  2139. wi::add (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2140. {
  2141. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2142. unsigned int precision = get_precision (result);
  2143. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2144. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2145. if (precision <= HOST_BITS_PER_WIDE_INT)
  2146. {
  2147. unsigned HOST_WIDE_INT xl = xi.ulow ();
  2148. unsigned HOST_WIDE_INT yl = yi.ulow ();
  2149. unsigned HOST_WIDE_INT resultl = xl + yl;
  2150. if (sgn == SIGNED)
  2151. {
  2152. if ((((resultl ^ xl) & (resultl ^ yl))
  2153. >> (precision - 1)) & 1)
  2154. {
  2155. if (xl > resultl)
  2156. *overflow = OVF_UNDERFLOW;
  2157. else if (xl < resultl)
  2158. *overflow = OVF_OVERFLOW;
  2159. else
  2160. *overflow = OVF_NONE;
  2161. }
  2162. else
  2163. *overflow = OVF_NONE;
  2164. }
  2165. else
  2166. *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
  2167. < (xl << (HOST_BITS_PER_WIDE_INT - precision)))
  2168. ? OVF_OVERFLOW : OVF_NONE;
  2169. val[0] = resultl;
  2170. result.set_len (1);
  2171. }
  2172. else
  2173. result.set_len (add_large (val, xi.val, xi.len,
  2174. yi.val, yi.len, precision,
  2175. sgn, overflow));
  2176. return result;
  2177. }
  2178. /* Return X - Y. */
  2179. template <typename T1, typename T2>
  2180. inline WI_BINARY_RESULT (T1, T2)
  2181. wi::sub (const T1 &x, const T2 &y)
  2182. {
  2183. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2184. unsigned int precision = get_precision (result);
  2185. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2186. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2187. if (precision <= HOST_BITS_PER_WIDE_INT)
  2188. {
  2189. val[0] = xi.ulow () - yi.ulow ();
  2190. result.set_len (1);
  2191. }
  2192. /* If the precision is known at compile time to be greater than
  2193. HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
  2194. knowing that (a) all bits in those HWIs are significant and
  2195. (b) the result has room for at least two HWIs. This provides
  2196. a fast path for things like offset_int and widest_int.
  2197. The STATIC_CONSTANT_P test prevents this path from being
  2198. used for wide_ints. wide_ints with precisions greater than
  2199. HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
  2200. point handling them inline. */
  2201. else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
  2202. && __builtin_expect (xi.len + yi.len == 2, true))
  2203. {
  2204. unsigned HOST_WIDE_INT xl = xi.ulow ();
  2205. unsigned HOST_WIDE_INT yl = yi.ulow ();
  2206. unsigned HOST_WIDE_INT resultl = xl - yl;
  2207. val[0] = resultl;
  2208. val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
  2209. result.set_len (1 + (((resultl ^ xl) & (xl ^ yl))
  2210. >> (HOST_BITS_PER_WIDE_INT - 1)));
  2211. }
  2212. else
  2213. result.set_len (sub_large (val, xi.val, xi.len,
  2214. yi.val, yi.len, precision,
  2215. UNSIGNED, 0));
  2216. return result;
  2217. }
  2218. /* Return X - Y. Treat X and Y as having the signednes given by SGN
  2219. and indicate in *OVERFLOW whether the operation overflowed. */
  2220. template <typename T1, typename T2>
  2221. inline WI_BINARY_RESULT (T1, T2)
  2222. wi::sub (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2223. {
  2224. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2225. unsigned int precision = get_precision (result);
  2226. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2227. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2228. if (precision <= HOST_BITS_PER_WIDE_INT)
  2229. {
  2230. unsigned HOST_WIDE_INT xl = xi.ulow ();
  2231. unsigned HOST_WIDE_INT yl = yi.ulow ();
  2232. unsigned HOST_WIDE_INT resultl = xl - yl;
  2233. if (sgn == SIGNED)
  2234. {
  2235. if ((((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1)
  2236. {
  2237. if (xl > yl)
  2238. *overflow = OVF_UNDERFLOW;
  2239. else if (xl < yl)
  2240. *overflow = OVF_OVERFLOW;
  2241. else
  2242. *overflow = OVF_NONE;
  2243. }
  2244. else
  2245. *overflow = OVF_NONE;
  2246. }
  2247. else
  2248. *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
  2249. > (xl << (HOST_BITS_PER_WIDE_INT - precision)))
  2250. ? OVF_UNDERFLOW : OVF_NONE;
  2251. val[0] = resultl;
  2252. result.set_len (1);
  2253. }
  2254. else
  2255. result.set_len (sub_large (val, xi.val, xi.len,
  2256. yi.val, yi.len, precision,
  2257. sgn, overflow));
  2258. return result;
  2259. }
  2260. /* Return X * Y. */
  2261. template <typename T1, typename T2>
  2262. inline WI_BINARY_RESULT (T1, T2)
  2263. wi::mul (const T1 &x, const T2 &y)
  2264. {
  2265. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2266. unsigned int precision = get_precision (result);
  2267. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2268. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2269. if (precision <= HOST_BITS_PER_WIDE_INT)
  2270. {
  2271. val[0] = xi.ulow () * yi.ulow ();
  2272. result.set_len (1);
  2273. }
  2274. else
  2275. result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len,
  2276. precision, UNSIGNED, 0, false));
  2277. return result;
  2278. }
  2279. /* Return X * Y. Treat X and Y as having the signednes given by SGN
  2280. and indicate in *OVERFLOW whether the operation overflowed. */
  2281. template <typename T1, typename T2>
  2282. inline WI_BINARY_RESULT (T1, T2)
  2283. wi::mul (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2284. {
  2285. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2286. unsigned int precision = get_precision (result);
  2287. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2288. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2289. result.set_len (mul_internal (val, xi.val, xi.len,
  2290. yi.val, yi.len, precision,
  2291. sgn, overflow, false));
  2292. return result;
  2293. }
  2294. /* Return X * Y, treating both X and Y as signed values. Indicate in
  2295. *OVERFLOW whether the operation overflowed. */
  2296. template <typename T1, typename T2>
  2297. inline WI_BINARY_RESULT (T1, T2)
  2298. wi::smul (const T1 &x, const T2 &y, overflow_type *overflow)
  2299. {
  2300. return mul (x, y, SIGNED, overflow);
  2301. }
  2302. /* Return X * Y, treating both X and Y as unsigned values. Indicate in
  2303. *OVERFLOW if the result overflows. */
  2304. template <typename T1, typename T2>
  2305. inline WI_BINARY_RESULT (T1, T2)
  2306. wi::umul (const T1 &x, const T2 &y, overflow_type *overflow)
  2307. {
  2308. return mul (x, y, UNSIGNED, overflow);
  2309. }
  2310. /* Perform a widening multiplication of X and Y, extending the values
  2311. according to SGN, and return the high part of the result. */
  2312. template <typename T1, typename T2>
  2313. inline WI_BINARY_RESULT (T1, T2)
  2314. wi::mul_high (const T1 &x, const T2 &y, signop sgn)
  2315. {
  2316. WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
  2317. unsigned int precision = get_precision (result);
  2318. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2319. WIDE_INT_REF_FOR (T2) yi (y, precision);
  2320. result.set_len (mul_internal (val, xi.val, xi.len,
  2321. yi.val, yi.len, precision,
  2322. sgn, 0, true));
  2323. return result;
  2324. }
  2325. /* Return X / Y, rouding towards 0. Treat X and Y as having the
  2326. signedness given by SGN. Indicate in *OVERFLOW if the result
  2327. overflows. */
  2328. template <typename T1, typename T2>
  2329. inline WI_BINARY_RESULT (T1, T2)
  2330. wi::div_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2331. {
  2332. WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
  2333. unsigned int precision = get_precision (quotient);
  2334. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2335. WIDE_INT_REF_FOR (T2) yi (y);
  2336. quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
  2337. precision,
  2338. yi.val, yi.len, yi.precision,
  2339. sgn, overflow));
  2340. return quotient;
  2341. }
  2342. /* Return X / Y, rouding towards 0. Treat X and Y as signed values. */
  2343. template <typename T1, typename T2>
  2344. inline WI_BINARY_RESULT (T1, T2)
  2345. wi::sdiv_trunc (const T1 &x, const T2 &y)
  2346. {
  2347. return div_trunc (x, y, SIGNED);
  2348. }
  2349. /* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */
  2350. template <typename T1, typename T2>
  2351. inline WI_BINARY_RESULT (T1, T2)
  2352. wi::udiv_trunc (const T1 &x, const T2 &y)
  2353. {
  2354. return div_trunc (x, y, UNSIGNED);
  2355. }
  2356. /* Return X / Y, rouding towards -inf. Treat X and Y as having the
  2357. signedness given by SGN. Indicate in *OVERFLOW if the result
  2358. overflows. */
  2359. template <typename T1, typename T2>
  2360. inline WI_BINARY_RESULT (T1, T2)
  2361. wi::div_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2362. {
  2363. WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
  2364. WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
  2365. unsigned int precision = get_precision (quotient);
  2366. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2367. WIDE_INT_REF_FOR (T2) yi (y);
  2368. unsigned int remainder_len;
  2369. quotient.set_len (divmod_internal (quotient_val,
  2370. &remainder_len, remainder_val,
  2371. xi.val, xi.len, precision,
  2372. yi.val, yi.len, yi.precision, sgn,
  2373. overflow));
  2374. remainder.set_len (remainder_len);
  2375. if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
  2376. return quotient - 1;
  2377. return quotient;
  2378. }
  2379. /* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */
  2380. template <typename T1, typename T2>
  2381. inline WI_BINARY_RESULT (T1, T2)
  2382. wi::sdiv_floor (const T1 &x, const T2 &y)
  2383. {
  2384. return div_floor (x, y, SIGNED);
  2385. }
  2386. /* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */
  2387. /* ??? Why do we have both this and udiv_trunc. Aren't they the same? */
  2388. template <typename T1, typename T2>
  2389. inline WI_BINARY_RESULT (T1, T2)
  2390. wi::udiv_floor (const T1 &x, const T2 &y)
  2391. {
  2392. return div_floor (x, y, UNSIGNED);
  2393. }
  2394. /* Return X / Y, rouding towards +inf. Treat X and Y as having the
  2395. signedness given by SGN. Indicate in *OVERFLOW if the result
  2396. overflows. */
  2397. template <typename T1, typename T2>
  2398. inline WI_BINARY_RESULT (T1, T2)
  2399. wi::div_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2400. {
  2401. WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
  2402. WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
  2403. unsigned int precision = get_precision (quotient);
  2404. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2405. WIDE_INT_REF_FOR (T2) yi (y);
  2406. unsigned int remainder_len;
  2407. quotient.set_len (divmod_internal (quotient_val,
  2408. &remainder_len, remainder_val,
  2409. xi.val, xi.len, precision,
  2410. yi.val, yi.len, yi.precision, sgn,
  2411. overflow));
  2412. remainder.set_len (remainder_len);
  2413. if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
  2414. return quotient + 1;
  2415. return quotient;
  2416. }
  2417. /* Return X / Y, rouding towards +inf. Treat X and Y as unsigned values. */
  2418. template <typename T1, typename T2>
  2419. inline WI_BINARY_RESULT (T1, T2)
  2420. wi::udiv_ceil (const T1 &x, const T2 &y)
  2421. {
  2422. return div_ceil (x, y, UNSIGNED);
  2423. }
  2424. /* Return X / Y, rouding towards nearest with ties away from zero.
  2425. Treat X and Y as having the signedness given by SGN. Indicate
  2426. in *OVERFLOW if the result overflows. */
  2427. template <typename T1, typename T2>
  2428. inline WI_BINARY_RESULT (T1, T2)
  2429. wi::div_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2430. {
  2431. WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
  2432. WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
  2433. unsigned int precision = get_precision (quotient);
  2434. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2435. WIDE_INT_REF_FOR (T2) yi (y);
  2436. unsigned int remainder_len;
  2437. quotient.set_len (divmod_internal (quotient_val,
  2438. &remainder_len, remainder_val,
  2439. xi.val, xi.len, precision,
  2440. yi.val, yi.len, yi.precision, sgn,
  2441. overflow));
  2442. remainder.set_len (remainder_len);
  2443. if (remainder != 0)
  2444. {
  2445. if (sgn == SIGNED)
  2446. {
  2447. WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder);
  2448. if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder)))
  2449. {
  2450. if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
  2451. return quotient - 1;
  2452. else
  2453. return quotient + 1;
  2454. }
  2455. }
  2456. else
  2457. {
  2458. if (wi::geu_p (remainder, wi::sub (y, remainder)))
  2459. return quotient + 1;
  2460. }
  2461. }
  2462. return quotient;
  2463. }
  2464. /* Return X / Y, rouding towards 0. Treat X and Y as having the
  2465. signedness given by SGN. Store the remainder in *REMAINDER_PTR. */
  2466. template <typename T1, typename T2>
  2467. inline WI_BINARY_RESULT (T1, T2)
  2468. wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
  2469. WI_BINARY_RESULT (T1, T2) *remainder_ptr)
  2470. {
  2471. WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
  2472. WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
  2473. unsigned int precision = get_precision (quotient);
  2474. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2475. WIDE_INT_REF_FOR (T2) yi (y);
  2476. unsigned int remainder_len;
  2477. quotient.set_len (divmod_internal (quotient_val,
  2478. &remainder_len, remainder_val,
  2479. xi.val, xi.len, precision,
  2480. yi.val, yi.len, yi.precision, sgn, 0));
  2481. remainder.set_len (remainder_len);
  2482. *remainder_ptr = remainder;
  2483. return quotient;
  2484. }
  2485. /* Compute the greatest common divisor of two numbers A and B using
  2486. Euclid's algorithm. */
  2487. template <typename T1, typename T2>
  2488. inline WI_BINARY_RESULT (T1, T2)
  2489. wi::gcd (const T1 &a, const T2 &b, signop sgn)
  2490. {
  2491. T1 x, y, z;
  2492. x = wi::abs (a);
  2493. y = wi::abs (b);
  2494. while (gt_p (x, 0, sgn))
  2495. {
  2496. z = mod_trunc (y, x, sgn);
  2497. y = x;
  2498. x = z;
  2499. }
  2500. return y;
  2501. }
  2502. /* Compute X / Y, rouding towards 0, and return the remainder.
  2503. Treat X and Y as having the signedness given by SGN. Indicate
  2504. in *OVERFLOW if the division overflows. */
  2505. template <typename T1, typename T2>
  2506. inline WI_BINARY_RESULT (T1, T2)
  2507. wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2508. {
  2509. WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
  2510. unsigned int precision = get_precision (remainder);
  2511. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2512. WIDE_INT_REF_FOR (T2) yi (y);
  2513. unsigned int remainder_len;
  2514. divmod_internal (0, &remainder_len, remainder_val,
  2515. xi.val, xi.len, precision,
  2516. yi.val, yi.len, yi.precision, sgn, overflow);
  2517. remainder.set_len (remainder_len);
  2518. return remainder;
  2519. }
  2520. /* Compute X / Y, rouding towards 0, and return the remainder.
  2521. Treat X and Y as signed values. */
  2522. template <typename T1, typename T2>
  2523. inline WI_BINARY_RESULT (T1, T2)
  2524. wi::smod_trunc (const T1 &x, const T2 &y)
  2525. {
  2526. return mod_trunc (x, y, SIGNED);
  2527. }
  2528. /* Compute X / Y, rouding towards 0, and return the remainder.
  2529. Treat X and Y as unsigned values. */
  2530. template <typename T1, typename T2>
  2531. inline WI_BINARY_RESULT (T1, T2)
  2532. wi::umod_trunc (const T1 &x, const T2 &y)
  2533. {
  2534. return mod_trunc (x, y, UNSIGNED);
  2535. }
  2536. /* Compute X / Y, rouding towards -inf, and return the remainder.
  2537. Treat X and Y as having the signedness given by SGN. Indicate
  2538. in *OVERFLOW if the division overflows. */
  2539. template <typename T1, typename T2>
  2540. inline WI_BINARY_RESULT (T1, T2)
  2541. wi::mod_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2542. {
  2543. WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
  2544. WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
  2545. unsigned int precision = get_precision (quotient);
  2546. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2547. WIDE_INT_REF_FOR (T2) yi (y);
  2548. unsigned int remainder_len;
  2549. quotient.set_len (divmod_internal (quotient_val,
  2550. &remainder_len, remainder_val,
  2551. xi.val, xi.len, precision,
  2552. yi.val, yi.len, yi.precision, sgn,
  2553. overflow));
  2554. remainder.set_len (remainder_len);
  2555. if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
  2556. return remainder + y;
  2557. return remainder;
  2558. }
  2559. /* Compute X / Y, rouding towards -inf, and return the remainder.
  2560. Treat X and Y as unsigned values. */
  2561. /* ??? Why do we have both this and umod_trunc. Aren't they the same? */
  2562. template <typename T1, typename T2>
  2563. inline WI_BINARY_RESULT (T1, T2)
  2564. wi::umod_floor (const T1 &x, const T2 &y)
  2565. {
  2566. return mod_floor (x, y, UNSIGNED);
  2567. }
  2568. /* Compute X / Y, rouding towards +inf, and return the remainder.
  2569. Treat X and Y as having the signedness given by SGN. Indicate
  2570. in *OVERFLOW if the division overflows. */
  2571. template <typename T1, typename T2>
  2572. inline WI_BINARY_RESULT (T1, T2)
  2573. wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2574. {
  2575. WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
  2576. WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
  2577. unsigned int precision = get_precision (quotient);
  2578. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2579. WIDE_INT_REF_FOR (T2) yi (y);
  2580. unsigned int remainder_len;
  2581. quotient.set_len (divmod_internal (quotient_val,
  2582. &remainder_len, remainder_val,
  2583. xi.val, xi.len, precision,
  2584. yi.val, yi.len, yi.precision, sgn,
  2585. overflow));
  2586. remainder.set_len (remainder_len);
  2587. if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
  2588. return remainder - y;
  2589. return remainder;
  2590. }
  2591. /* Compute X / Y, rouding towards nearest with ties away from zero,
  2592. and return the remainder. Treat X and Y as having the signedness
  2593. given by SGN. Indicate in *OVERFLOW if the division overflows. */
  2594. template <typename T1, typename T2>
  2595. inline WI_BINARY_RESULT (T1, T2)
  2596. wi::mod_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
  2597. {
  2598. WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
  2599. WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
  2600. unsigned int precision = get_precision (quotient);
  2601. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2602. WIDE_INT_REF_FOR (T2) yi (y);
  2603. unsigned int remainder_len;
  2604. quotient.set_len (divmod_internal (quotient_val,
  2605. &remainder_len, remainder_val,
  2606. xi.val, xi.len, precision,
  2607. yi.val, yi.len, yi.precision, sgn,
  2608. overflow));
  2609. remainder.set_len (remainder_len);
  2610. if (remainder != 0)
  2611. {
  2612. if (sgn == SIGNED)
  2613. {
  2614. WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder);
  2615. if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder)))
  2616. {
  2617. if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
  2618. return remainder + y;
  2619. else
  2620. return remainder - y;
  2621. }
  2622. }
  2623. else
  2624. {
  2625. if (wi::geu_p (remainder, wi::sub (y, remainder)))
  2626. return remainder - y;
  2627. }
  2628. }
  2629. return remainder;
  2630. }
  2631. /* Return true if X is a multiple of Y. Treat X and Y as having the
  2632. signedness given by SGN. */
  2633. template <typename T1, typename T2>
  2634. inline bool
  2635. wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn)
  2636. {
  2637. return wi::mod_trunc (x, y, sgn) == 0;
  2638. }
  2639. /* Return true if X is a multiple of Y, storing X / Y in *RES if so.
  2640. Treat X and Y as having the signedness given by SGN. */
  2641. template <typename T1, typename T2>
  2642. inline bool
  2643. wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
  2644. WI_BINARY_RESULT (T1, T2) *res)
  2645. {
  2646. WI_BINARY_RESULT (T1, T2) remainder;
  2647. WI_BINARY_RESULT (T1, T2) quotient
  2648. = divmod_trunc (x, y, sgn, &remainder);
  2649. if (remainder == 0)
  2650. {
  2651. *res = quotient;
  2652. return true;
  2653. }
  2654. return false;
  2655. }
  2656. /* Return X << Y. Return 0 if Y is greater than or equal to
  2657. the precision of X. */
  2658. template <typename T1, typename T2>
  2659. inline WI_UNARY_RESULT (T1)
  2660. wi::lshift (const T1 &x, const T2 &y)
  2661. {
  2662. WI_UNARY_RESULT_VAR (result, val, T1, x);
  2663. unsigned int precision = get_precision (result);
  2664. WIDE_INT_REF_FOR (T1) xi (x, precision);
  2665. WIDE_INT_REF_FOR (T2) yi (y);
  2666. /* Handle the simple cases quickly. */
  2667. if (geu_p (yi, precision))
  2668. {
  2669. val[0] = 0;
  2670. result.set_len (1);
  2671. }
  2672. else
  2673. {
  2674. unsigned int shift = yi.to_uhwi ();
  2675. /* For fixed-precision integers like offset_int and widest_int,
  2676. handle the case where the shift value is constant and the
  2677. result is a single nonnegative HWI (meaning that we don't
  2678. need to worry about val[1]). This is particularly common
  2679. for converting a byte count to a bit count.
  2680. For variable-precision integers like wide_int, handle HWI
  2681. and sub-HWI integers inline. */
  2682. if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
  2683. ? (STATIC_CONSTANT_P (shift < HOST_BITS_PER_WIDE_INT - 1)
  2684. && xi.len == 1
  2685. && xi.val[0] <= (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT)
  2686. HOST_WIDE_INT_MAX >> shift))
  2687. : precision <= HOST_BITS_PER_WIDE_INT)
  2688. {
  2689. val[0] = xi.ulow () << shift;
  2690. result.set_len (1);
  2691. }
  2692. else
  2693. result.set_len (lshift_large (val, xi.val, xi.len,
  2694. precision, shift));
  2695. }
  2696. return result;
  2697. }
  2698. /* Return X >> Y, using a logical shift. Return 0 if Y is greater than
  2699. or equal to the precision of X. */
  2700. template <typename T1, typename T2>
  2701. inline WI_UNARY_RESULT (T1)
  2702. wi::lrshift (const T1 &x, const T2 &y)
  2703. {
  2704. WI_UNARY_RESULT_VAR (result, val, T1, x);
  2705. /* Do things in the precision of the input rather than the output,
  2706. since the result can be no larger than that. */
  2707. WIDE_INT_REF_FOR (T1) xi (x);
  2708. WIDE_INT_REF_FOR (T2) yi (y);
  2709. /* Handle the simple cases quickly. */
  2710. if (geu_p (yi, xi.precision))
  2711. {
  2712. val[0] = 0;
  2713. result.set_len (1);
  2714. }
  2715. else
  2716. {
  2717. unsigned int shift = yi.to_uhwi ();
  2718. /* For fixed-precision integers like offset_int and widest_int,
  2719. handle the case where the shift value is constant and the
  2720. shifted value is a single nonnegative HWI (meaning that all
  2721. bits above the HWI are zero). This is particularly common
  2722. for converting a bit count to a byte count.
  2723. For variable-precision integers like wide_int, handle HWI
  2724. and sub-HWI integers inline. */
  2725. if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
  2726. ? (shift < HOST_BITS_PER_WIDE_INT
  2727. && xi.len == 1
  2728. && xi.val[0] >= 0)
  2729. : xi.precision <= HOST_BITS_PER_WIDE_INT)
  2730. {
  2731. val[0] = xi.to_uhwi () >> shift;
  2732. result.set_len (1);
  2733. }
  2734. else
  2735. result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision,
  2736. get_precision (result), shift));
  2737. }
  2738. return result;
  2739. }
  2740. /* Return X >> Y, using an arithmetic shift. Return a sign mask if
  2741. Y is greater than or equal to the precision of X. */
  2742. template <typename T1, typename T2>
  2743. inline WI_UNARY_RESULT (T1)
  2744. wi::arshift (const T1 &x, const T2 &y)
  2745. {
  2746. WI_UNARY_RESULT_VAR (result, val, T1, x);
  2747. /* Do things in the precision of the input rather than the output,
  2748. since the result can be no larger than that. */
  2749. WIDE_INT_REF_FOR (T1) xi (x);
  2750. WIDE_INT_REF_FOR (T2) yi (y);
  2751. /* Handle the simple cases quickly. */
  2752. if (geu_p (yi, xi.precision))
  2753. {
  2754. val[0] = sign_mask (x);
  2755. result.set_len (1);
  2756. }
  2757. else
  2758. {
  2759. unsigned int shift = yi.to_uhwi ();
  2760. if (xi.precision <= HOST_BITS_PER_WIDE_INT)
  2761. {
  2762. val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
  2763. result.set_len (1, true);
  2764. }
  2765. else
  2766. result.set_len (arshift_large (val, xi.val, xi.len, xi.precision,
  2767. get_precision (result), shift));
  2768. }
  2769. return result;
  2770. }
  2771. /* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a
  2772. logical shift otherwise. */
  2773. template <typename T1, typename T2>
  2774. inline WI_UNARY_RESULT (T1)
  2775. wi::rshift (const T1 &x, const T2 &y, signop sgn)
  2776. {
  2777. if (sgn == UNSIGNED)
  2778. return lrshift (x, y);
  2779. else
  2780. return arshift (x, y);
  2781. }
  2782. /* Return the result of rotating the low WIDTH bits of X left by Y
  2783. bits and zero-extending the result. Use a full-width rotate if
  2784. WIDTH is zero. */
  2785. template <typename T1, typename T2>
  2786. WI_UNARY_RESULT (T1)
  2787. wi::lrotate (const T1 &x, const T2 &y, unsigned int width)
  2788. {
  2789. unsigned int precision = get_binary_precision (x, x);
  2790. if (width == 0)
  2791. width = precision;
  2792. WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
  2793. WI_UNARY_RESULT (T1) left = wi::lshift (x, ymod);
  2794. WI_UNARY_RESULT (T1) right = wi::lrshift (x, wi::sub (width, ymod));
  2795. if (width != precision)
  2796. return wi::zext (left, width) | wi::zext (right, width);
  2797. return left | right;
  2798. }
  2799. /* Return the result of rotating the low WIDTH bits of X right by Y
  2800. bits and zero-extending the result. Use a full-width rotate if
  2801. WIDTH is zero. */
  2802. template <typename T1, typename T2>
  2803. WI_UNARY_RESULT (T1)
  2804. wi::rrotate (const T1 &x, const T2 &y, unsigned int width)
  2805. {
  2806. unsigned int precision = get_binary_precision (x, x);
  2807. if (width == 0)
  2808. width = precision;
  2809. WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
  2810. WI_UNARY_RESULT (T1) right = wi::lrshift (x, ymod);
  2811. WI_UNARY_RESULT (T1) left = wi::lshift (x, wi::sub (width, ymod));
  2812. if (width != precision)
  2813. return wi::zext (left, width) | wi::zext (right, width);
  2814. return left | right;
  2815. }
  2816. /* Return 0 if the number of 1s in X is even and 1 if the number of 1s
  2817. is odd. */
  2818. inline int
  2819. wi::parity (const wide_int_ref &x)
  2820. {
  2821. return popcount (x) & 1;
  2822. }
  2823. /* Extract WIDTH bits from X, starting at BITPOS. */
  2824. template <typename T>
  2825. inline unsigned HOST_WIDE_INT
  2826. wi::extract_uhwi (const T &x, unsigned int bitpos, unsigned int width)
  2827. {
  2828. unsigned precision = get_precision (x);
  2829. if (precision < bitpos + width)
  2830. precision = bitpos + width;
  2831. WIDE_INT_REF_FOR (T) xi (x, precision);
  2832. /* Handle this rare case after the above, so that we assert about
  2833. bogus BITPOS values. */
  2834. if (width == 0)
  2835. return 0;
  2836. unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT;
  2837. unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT;
  2838. unsigned HOST_WIDE_INT res = xi.elt (start);
  2839. res >>= shift;
  2840. if (shift + width > HOST_BITS_PER_WIDE_INT)
  2841. {
  2842. unsigned HOST_WIDE_INT upper = xi.elt (start + 1);
  2843. res |= upper << (-shift % HOST_BITS_PER_WIDE_INT);
  2844. }
  2845. return zext_hwi (res, width);
  2846. }
  2847. /* Return the minimum precision needed to store X with sign SGN. */
  2848. template <typename T>
  2849. inline unsigned int
  2850. wi::min_precision (const T &x, signop sgn)
  2851. {
  2852. if (sgn == SIGNED)
  2853. return get_precision (x) - clrsb (x);
  2854. else
  2855. return get_precision (x) - clz (x);
  2856. }
  2857. #define SIGNED_BINARY_PREDICATE(OP, F) \
  2858. template <typename T1, typename T2> \
  2859. inline WI_SIGNED_BINARY_PREDICATE_RESULT (T1, T2) \
  2860. OP (const T1 &x, const T2 &y) \
  2861. { \
  2862. return wi::F (x, y); \
  2863. }
  2864. SIGNED_BINARY_PREDICATE (operator <, lts_p)
  2865. SIGNED_BINARY_PREDICATE (operator <=, les_p)
  2866. SIGNED_BINARY_PREDICATE (operator >, gts_p)
  2867. SIGNED_BINARY_PREDICATE (operator >=, ges_p)
  2868. #undef SIGNED_BINARY_PREDICATE
  2869. #define UNARY_OPERATOR(OP, F) \
  2870. template<typename T> \
  2871. WI_UNARY_RESULT (generic_wide_int<T>) \
  2872. OP (const generic_wide_int<T> &x) \
  2873. { \
  2874. return wi::F (x); \
  2875. }
  2876. #define BINARY_PREDICATE(OP, F) \
  2877. template<typename T1, typename T2> \
  2878. WI_BINARY_PREDICATE_RESULT (T1, T2) \
  2879. OP (const T1 &x, const T2 &y) \
  2880. { \
  2881. return wi::F (x, y); \
  2882. }
  2883. #define BINARY_OPERATOR(OP, F) \
  2884. template<typename T1, typename T2> \
  2885. WI_BINARY_OPERATOR_RESULT (T1, T2) \
  2886. OP (const T1 &x, const T2 &y) \
  2887. { \
  2888. return wi::F (x, y); \
  2889. }
  2890. #define SHIFT_OPERATOR(OP, F) \
  2891. template<typename T1, typename T2> \
  2892. WI_BINARY_OPERATOR_RESULT (T1, T1) \
  2893. OP (const T1 &x, const T2 &y) \
  2894. { \
  2895. return wi::F (x, y); \
  2896. }
  2897. UNARY_OPERATOR (operator ~, bit_not)
  2898. UNARY_OPERATOR (operator -, neg)
  2899. BINARY_PREDICATE (operator ==, eq_p)
  2900. BINARY_PREDICATE (operator !=, ne_p)
  2901. BINARY_OPERATOR (operator &, bit_and)
  2902. BINARY_OPERATOR (operator |, bit_or)
  2903. BINARY_OPERATOR (operator ^, bit_xor)
  2904. BINARY_OPERATOR (operator +, add)
  2905. BINARY_OPERATOR (operator -, sub)
  2906. BINARY_OPERATOR (operator *, mul)
  2907. SHIFT_OPERATOR (operator <<, lshift)
  2908. #undef UNARY_OPERATOR
  2909. #undef BINARY_PREDICATE
  2910. #undef BINARY_OPERATOR
  2911. #undef SHIFT_OPERATOR
  2912. template <typename T1, typename T2>
  2913. inline WI_SIGNED_SHIFT_RESULT (T1, T2)
  2914. operator >> (const T1 &x, const T2 &y)
  2915. {
  2916. return wi::arshift (x, y);
  2917. }
  2918. template <typename T1, typename T2>
  2919. inline WI_SIGNED_SHIFT_RESULT (T1, T2)
  2920. operator / (const T1 &x, const T2 &y)
  2921. {
  2922. return wi::sdiv_trunc (x, y);
  2923. }
  2924. template <typename T1, typename T2>
  2925. inline WI_SIGNED_SHIFT_RESULT (T1, T2)
  2926. operator % (const T1 &x, const T2 &y)
  2927. {
  2928. return wi::smod_trunc (x, y);
  2929. }
  2930. template<typename T>
  2931. void
  2932. gt_ggc_mx (generic_wide_int <T> *)
  2933. {
  2934. }
  2935. template<typename T>
  2936. void
  2937. gt_pch_nx (generic_wide_int <T> *)
  2938. {
  2939. }
  2940. template<typename T>
  2941. void
  2942. gt_pch_nx (generic_wide_int <T> *, void (*) (void *, void *), void *)
  2943. {
  2944. }
  2945. template<int N>
  2946. void
  2947. gt_ggc_mx (trailing_wide_ints <N> *)
  2948. {
  2949. }
  2950. template<int N>
  2951. void
  2952. gt_pch_nx (trailing_wide_ints <N> *)
  2953. {
  2954. }
  2955. template<int N>
  2956. void
  2957. gt_pch_nx (trailing_wide_ints <N> *, void (*) (void *, void *), void *)
  2958. {
  2959. }
  2960. namespace wi
  2961. {
  2962. /* Used for overloaded functions in which the only other acceptable
  2963. scalar type is a pointer. It stops a plain 0 from being treated
  2964. as a null pointer. */
  2965. struct never_used1 {};
  2966. struct never_used2 {};
  2967. wide_int min_value (unsigned int, signop);
  2968. wide_int min_value (never_used1 *);
  2969. wide_int min_value (never_used2 *);
  2970. wide_int max_value (unsigned int, signop);
  2971. wide_int max_value (never_used1 *);
  2972. wide_int max_value (never_used2 *);
  2973. /* FIXME: this is target dependent, so should be elsewhere.
  2974. It also seems to assume that CHAR_BIT == BITS_PER_UNIT. */
  2975. wide_int from_buffer (const unsigned char *, unsigned int);
  2976. #ifndef GENERATOR_FILE
  2977. void to_mpz (const wide_int_ref &, mpz_t, signop);
  2978. #endif
  2979. wide_int mask (unsigned int, bool, unsigned int);
  2980. wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int);
  2981. wide_int set_bit_in_zero (unsigned int, unsigned int);
  2982. wide_int insert (const wide_int &x, const wide_int &y, unsigned int,
  2983. unsigned int);
  2984. wide_int round_down_for_mask (const wide_int &, const wide_int &);
  2985. wide_int round_up_for_mask (const wide_int &, const wide_int &);
  2986. template <typename T>
  2987. T mask (unsigned int, bool);
  2988. template <typename T>
  2989. T shifted_mask (unsigned int, unsigned int, bool);
  2990. template <typename T>
  2991. T set_bit_in_zero (unsigned int);
  2992. unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int);
  2993. unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int,
  2994. bool, unsigned int);
  2995. unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
  2996. unsigned int, unsigned int, bool);
  2997. }
  2998. /* Return a PRECISION-bit integer in which the low WIDTH bits are set
  2999. and the other bits are clear, or the inverse if NEGATE_P. */
  3000. inline wide_int
  3001. wi::mask (unsigned int width, bool negate_p, unsigned int precision)
  3002. {
  3003. wide_int result = wide_int::create (precision);
  3004. result.set_len (mask (result.write_val (), width, negate_p, precision));
  3005. return result;
  3006. }
  3007. /* Return a PRECISION-bit integer in which the low START bits are clear,
  3008. the next WIDTH bits are set, and the other bits are clear,
  3009. or the inverse if NEGATE_P. */
  3010. inline wide_int
  3011. wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p,
  3012. unsigned int precision)
  3013. {
  3014. wide_int result = wide_int::create (precision);
  3015. result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
  3016. precision));
  3017. return result;
  3018. }
  3019. /* Return a PRECISION-bit integer in which bit BIT is set and all the
  3020. others are clear. */
  3021. inline wide_int
  3022. wi::set_bit_in_zero (unsigned int bit, unsigned int precision)
  3023. {
  3024. return shifted_mask (bit, 1, false, precision);
  3025. }
  3026. /* Return an integer of type T in which the low WIDTH bits are set
  3027. and the other bits are clear, or the inverse if NEGATE_P. */
  3028. template <typename T>
  3029. inline T
  3030. wi::mask (unsigned int width, bool negate_p)
  3031. {
  3032. STATIC_ASSERT (wi::int_traits<T>::precision);
  3033. T result;
  3034. result.set_len (mask (result.write_val (), width, negate_p,
  3035. wi::int_traits <T>::precision));
  3036. return result;
  3037. }
  3038. /* Return an integer of type T in which the low START bits are clear,
  3039. the next WIDTH bits are set, and the other bits are clear, or the
  3040. inverse if NEGATE_P. */
  3041. template <typename T>
  3042. inline T
  3043. wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
  3044. {
  3045. STATIC_ASSERT (wi::int_traits<T>::precision);
  3046. T result;
  3047. result.set_len (shifted_mask (result.write_val (), start, width,
  3048. negate_p,
  3049. wi::int_traits <T>::precision));
  3050. return result;
  3051. }
  3052. /* Return an integer of type T in which bit BIT is set and all the
  3053. others are clear. */
  3054. template <typename T>
  3055. inline T
  3056. wi::set_bit_in_zero (unsigned int bit)
  3057. {
  3058. return shifted_mask <T> (bit, 1, false);
  3059. }
  3060. /* Accumulate a set of overflows into OVERFLOW. */
  3061. static inline void
  3062. wi::accumulate_overflow (wi::overflow_type &overflow,
  3063. wi::overflow_type suboverflow)
  3064. {
  3065. if (!suboverflow)
  3066. return;
  3067. if (!overflow)
  3068. overflow = suboverflow;
  3069. else if (overflow != suboverflow)
  3070. overflow = wi::OVF_UNKNOWN;
  3071. }
  3072. #endif /* WIDE_INT_H */