You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ref_Tensor.ipynb 91 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "markdown",
  5. "metadata": {},
  6. "source": [
  7. "# 第三章 PyTorch基础:Tensor和Autograd\n",
  8. "\n",
  9. "## 3.1 Tensor\n",
  10. "\n",
  11. "Tensor,又名张量,读者可能对这个名词似曾相识,因它不仅在PyTorch中出现过,它也是Theano、TensorFlow、\n",
  12. "Torch和MxNet中重要的数据结构。关于张量的本质不乏深度的剖析,但从工程角度来讲,可简单地认为它就是一个数组,且支持高效的科学计算。它可以是一个数(标量)、一维数组(向量)、二维数组(矩阵)和更高维的数组(高阶数据)。Tensor和Numpy的ndarrays类似,但PyTorch的tensor支持GPU加速。\n",
  13. "\n",
  14. "本节将系统讲解tensor的使用,力求面面俱到,但不会涉及每个函数。对于更多函数及其用法,读者可通过在IPython/Notebook中使用函数名加`?`查看帮助文档,或查阅PyTorch官方文档[^1]。\n",
  15. "\n",
  16. "[^1]: http://docs.pytorch.org"
  17. ]
  18. },
  19. {
  20. "cell_type": "code",
  21. "execution_count": 1,
  22. "metadata": {},
  23. "outputs": [
  24. {
  25. "data": {
  26. "text/plain": [
  27. "'0.4.1'"
  28. ]
  29. },
  30. "execution_count": 1,
  31. "metadata": {},
  32. "output_type": "execute_result"
  33. }
  34. ],
  35. "source": [
  36. "# Let's begin\n",
  37. "from __future__ import print_function\n",
  38. "import torch as t\n",
  39. "t.__version__"
  40. ]
  41. },
  42. {
  43. "cell_type": "markdown",
  44. "metadata": {},
  45. "source": [
  46. "### 3.1.1 基础操作\n",
  47. "\n",
  48. "学习过Numpy的读者会对本节内容感到非常熟悉,因tensor的接口有意设计成与Numpy类似,以方便用户使用。但不熟悉Numpy也没关系,本节内容并不要求先掌握Numpy。\n",
  49. "\n",
  50. "从接口的角度来讲,对tensor的操作可分为两类:\n",
  51. "\n",
  52. "1. `torch.function`,如`torch.save`等。\n",
  53. "2. 另一类是`tensor.function`,如`tensor.view`等。\n",
  54. "\n",
  55. "为方便使用,对tensor的大部分操作同时支持这两类接口,在本书中不做具体区分,如`torch.sum (torch.sum(a, b))`与`tensor.sum (a.sum(b))`功能等价。\n",
  56. "\n",
  57. "而从存储的角度来讲,对tensor的操作又可分为两类:\n",
  58. "\n",
  59. "1. 不会修改自身的数据,如 `a.add(b)`, 加法的结果会返回一个新的tensor。\n",
  60. "2. 会修改自身的数据,如 `a.add_(b)`, 加法的结果仍存储在a中,a被修改了。\n",
  61. "\n",
  62. "函数名以`_`结尾的都是inplace方式, 即会修改调用者自己的数据,在实际应用中需加以区分。\n",
  63. "\n",
  64. "#### 创建Tensor\n",
  65. "\n",
  66. "在PyTorch中新建tensor的方法有很多,具体如表3-1所示。\n",
  67. "\n",
  68. "表3-1: 常见新建tensor的方法\n",
  69. "\n",
  70. "|函数|功能|\n",
  71. "|:---:|:---:|\n",
  72. "|Tensor(\\*sizes)|基础构造函数|\n",
  73. "|ones(\\*sizes)|全1Tensor|\n",
  74. "|zeros(\\*sizes)|全0Tensor|\n",
  75. "|eye(\\*sizes)|对角线为1,其他为0|\n",
  76. "|arange(s,e,step|从s到e,步长为step|\n",
  77. "|linspace(s,e,steps)|从s到e,均匀切分成steps份|\n",
  78. "|rand/randn(\\*sizes)|均匀/标准分布|\n",
  79. "|normal(mean,std)/uniform(from,to)|正态分布/均匀分布|\n",
  80. "|randperm(m)|随机排列|\n",
  81. "\n",
  82. "其中使用`Tensor`函数新建tensor是最复杂多变的方式,它既可以接收一个list,并根据list的数据新建tensor,也能根据指定的形状新建tensor,还能传入其他的tensor,下面举几个例子。"
  83. ]
  84. },
  85. {
  86. "cell_type": "code",
  87. "execution_count": 2,
  88. "metadata": {},
  89. "outputs": [
  90. {
  91. "data": {
  92. "text/plain": [
  93. "tensor([[0.0000, 0.0000, 0.0000],\n",
  94. " [0.0000, 0.0000, 0.0000]])"
  95. ]
  96. },
  97. "execution_count": 2,
  98. "metadata": {},
  99. "output_type": "execute_result"
  100. }
  101. ],
  102. "source": [
  103. "# 指定tensor的形状\n",
  104. "a = t.Tensor(2, 3)\n",
  105. "a # 数值取决于内存空间的状态"
  106. ]
  107. },
  108. {
  109. "cell_type": "code",
  110. "execution_count": 6,
  111. "metadata": {},
  112. "outputs": [
  113. {
  114. "data": {
  115. "text/plain": [
  116. "tensor([[1., 2., 3.],\n",
  117. " [4., 5., 6.]])"
  118. ]
  119. },
  120. "execution_count": 6,
  121. "metadata": {},
  122. "output_type": "execute_result"
  123. }
  124. ],
  125. "source": [
  126. "# 用list的数据创建tensor\n",
  127. "b = t.Tensor([[1,2,3],[4,5,6]])\n",
  128. "b"
  129. ]
  130. },
  131. {
  132. "cell_type": "code",
  133. "execution_count": 7,
  134. "metadata": {},
  135. "outputs": [
  136. {
  137. "data": {
  138. "text/plain": [
  139. "[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]"
  140. ]
  141. },
  142. "execution_count": 7,
  143. "metadata": {},
  144. "output_type": "execute_result"
  145. }
  146. ],
  147. "source": [
  148. "b.tolist() # 把tensor转为list"
  149. ]
  150. },
  151. {
  152. "cell_type": "markdown",
  153. "metadata": {},
  154. "source": [
  155. "`tensor.size()`返回`torch.Size`对象,它是tuple的子类,但其使用方式与tuple略有区别"
  156. ]
  157. },
  158. {
  159. "cell_type": "code",
  160. "execution_count": 8,
  161. "metadata": {
  162. "scrolled": true
  163. },
  164. "outputs": [
  165. {
  166. "data": {
  167. "text/plain": [
  168. "torch.Size([2, 3])"
  169. ]
  170. },
  171. "execution_count": 8,
  172. "metadata": {},
  173. "output_type": "execute_result"
  174. }
  175. ],
  176. "source": [
  177. "b_size = b.size()\n",
  178. "b_size"
  179. ]
  180. },
  181. {
  182. "cell_type": "code",
  183. "execution_count": 9,
  184. "metadata": {},
  185. "outputs": [
  186. {
  187. "data": {
  188. "text/plain": [
  189. "6"
  190. ]
  191. },
  192. "execution_count": 9,
  193. "metadata": {},
  194. "output_type": "execute_result"
  195. }
  196. ],
  197. "source": [
  198. "b.numel() # b中元素总个数,2*3,等价于b.nelement()"
  199. ]
  200. },
  201. {
  202. "cell_type": "code",
  203. "execution_count": 10,
  204. "metadata": {
  205. "scrolled": true
  206. },
  207. "outputs": [
  208. {
  209. "data": {
  210. "text/plain": [
  211. "(tensor([[ 0.0000, 0.0000, 0.0000],\n",
  212. " [ 0.0000, -0.0000, 0.0000]]), tensor([2., 3.]))"
  213. ]
  214. },
  215. "execution_count": 10,
  216. "metadata": {},
  217. "output_type": "execute_result"
  218. }
  219. ],
  220. "source": [
  221. "# 创建一个和b形状一样的tensor\n",
  222. "c = t.Tensor(b_size)\n",
  223. "# 创建一个元素为2和3的tensor\n",
  224. "d = t.Tensor((2, 3))\n",
  225. "c, d"
  226. ]
  227. },
  228. {
  229. "cell_type": "markdown",
  230. "metadata": {},
  231. "source": [
  232. "除了`tensor.size()`,还可以利用`tensor.shape`直接查看tensor的形状,`tensor.shape`等价于`tensor.size()`"
  233. ]
  234. },
  235. {
  236. "cell_type": "code",
  237. "execution_count": 11,
  238. "metadata": {},
  239. "outputs": [
  240. {
  241. "data": {
  242. "text/plain": [
  243. "torch.Size([2, 3])"
  244. ]
  245. },
  246. "execution_count": 11,
  247. "metadata": {},
  248. "output_type": "execute_result"
  249. }
  250. ],
  251. "source": [
  252. "c.shape"
  253. ]
  254. },
  255. {
  256. "cell_type": "code",
  257. "execution_count": 12,
  258. "metadata": {},
  259. "outputs": [],
  260. "source": [
  261. "c.shape??"
  262. ]
  263. },
  264. {
  265. "cell_type": "markdown",
  266. "metadata": {},
  267. "source": [
  268. "需要注意的是,`t.Tensor(*sizes)`创建tensor时,系统不会马上分配空间,只是会计算剩余的内存是否足够使用,使用到tensor时才会分配,而其它操作都是在创建完tensor之后马上进行空间分配。其它常用的创建tensor的方法举例如下。"
  269. ]
  270. },
  271. {
  272. "cell_type": "code",
  273. "execution_count": 13,
  274. "metadata": {
  275. "scrolled": true
  276. },
  277. "outputs": [
  278. {
  279. "data": {
  280. "text/plain": [
  281. "tensor([[1., 1., 1.],\n",
  282. " [1., 1., 1.]])"
  283. ]
  284. },
  285. "execution_count": 13,
  286. "metadata": {},
  287. "output_type": "execute_result"
  288. }
  289. ],
  290. "source": [
  291. "t.ones(2, 3)"
  292. ]
  293. },
  294. {
  295. "cell_type": "code",
  296. "execution_count": 14,
  297. "metadata": {},
  298. "outputs": [
  299. {
  300. "data": {
  301. "text/plain": [
  302. "tensor([[0., 0., 0.],\n",
  303. " [0., 0., 0.]])"
  304. ]
  305. },
  306. "execution_count": 14,
  307. "metadata": {},
  308. "output_type": "execute_result"
  309. }
  310. ],
  311. "source": [
  312. "t.zeros(2, 3)"
  313. ]
  314. },
  315. {
  316. "cell_type": "code",
  317. "execution_count": 15,
  318. "metadata": {},
  319. "outputs": [
  320. {
  321. "data": {
  322. "text/plain": [
  323. "tensor([1, 3, 5])"
  324. ]
  325. },
  326. "execution_count": 15,
  327. "metadata": {},
  328. "output_type": "execute_result"
  329. }
  330. ],
  331. "source": [
  332. "t.arange(1, 6, 2)"
  333. ]
  334. },
  335. {
  336. "cell_type": "code",
  337. "execution_count": 16,
  338. "metadata": {},
  339. "outputs": [
  340. {
  341. "data": {
  342. "text/plain": [
  343. "tensor([ 1.0000, 5.5000, 10.0000])"
  344. ]
  345. },
  346. "execution_count": 16,
  347. "metadata": {},
  348. "output_type": "execute_result"
  349. }
  350. ],
  351. "source": [
  352. "t.linspace(1, 10, 3)"
  353. ]
  354. },
  355. {
  356. "cell_type": "code",
  357. "execution_count": 17,
  358. "metadata": {},
  359. "outputs": [
  360. {
  361. "data": {
  362. "text/plain": [
  363. "tensor([[ 0.4388, 0.9361, 0.8411],\n",
  364. " [-1.0667, -0.5187, 0.5520]])"
  365. ]
  366. },
  367. "execution_count": 17,
  368. "metadata": {},
  369. "output_type": "execute_result"
  370. }
  371. ],
  372. "source": [
  373. "t.randn(2, 3)"
  374. ]
  375. },
  376. {
  377. "cell_type": "code",
  378. "execution_count": 18,
  379. "metadata": {
  380. "scrolled": true
  381. },
  382. "outputs": [
  383. {
  384. "data": {
  385. "text/plain": [
  386. "tensor([2, 0, 4, 1, 3])"
  387. ]
  388. },
  389. "execution_count": 18,
  390. "metadata": {},
  391. "output_type": "execute_result"
  392. }
  393. ],
  394. "source": [
  395. "t.randperm(5) # 长度为5的随机排列"
  396. ]
  397. },
  398. {
  399. "cell_type": "code",
  400. "execution_count": 19,
  401. "metadata": {
  402. "scrolled": true
  403. },
  404. "outputs": [
  405. {
  406. "data": {
  407. "text/plain": [
  408. "tensor([[1., 0., 0.],\n",
  409. " [0., 1., 0.]])"
  410. ]
  411. },
  412. "execution_count": 19,
  413. "metadata": {},
  414. "output_type": "execute_result"
  415. }
  416. ],
  417. "source": [
  418. "t.eye(2, 3) # 对角线为1, 不要求行列数一致"
  419. ]
  420. },
  421. {
  422. "cell_type": "markdown",
  423. "metadata": {},
  424. "source": [
  425. "#### 常用Tensor操作"
  426. ]
  427. },
  428. {
  429. "cell_type": "markdown",
  430. "metadata": {},
  431. "source": [
  432. "通过`tensor.view`方法可以调整tensor的形状,但必须保证调整前后元素总数一致。`view`不会修改自身的数据,返回的新tensor与源tensor共享内存,也即更改其中的一个,另外一个也会跟着改变。在实际应用中可能经常需要添加或减少某一维度,这时候`squeeze`和`unsqueeze`两个函数就派上用场了。"
  433. ]
  434. },
  435. {
  436. "cell_type": "code",
  437. "execution_count": 20,
  438. "metadata": {
  439. "scrolled": true
  440. },
  441. "outputs": [
  442. {
  443. "data": {
  444. "text/plain": [
  445. "tensor([[0, 1, 2],\n",
  446. " [3, 4, 5]])"
  447. ]
  448. },
  449. "execution_count": 20,
  450. "metadata": {},
  451. "output_type": "execute_result"
  452. }
  453. ],
  454. "source": [
  455. "a = t.arange(0, 6)\n",
  456. "a.view(2, 3)"
  457. ]
  458. },
  459. {
  460. "cell_type": "code",
  461. "execution_count": 21,
  462. "metadata": {
  463. "scrolled": true
  464. },
  465. "outputs": [
  466. {
  467. "data": {
  468. "text/plain": [
  469. "tensor([[0, 1, 2],\n",
  470. " [3, 4, 5]])"
  471. ]
  472. },
  473. "execution_count": 21,
  474. "metadata": {},
  475. "output_type": "execute_result"
  476. }
  477. ],
  478. "source": [
  479. "b = a.view(-1, 3) # 当某一维为-1的时候,会自动计算它的大小\n",
  480. "b"
  481. ]
  482. },
  483. {
  484. "cell_type": "code",
  485. "execution_count": 22,
  486. "metadata": {},
  487. "outputs": [
  488. {
  489. "data": {
  490. "text/plain": [
  491. "tensor([[[0, 1, 2]],\n",
  492. "\n",
  493. " [[3, 4, 5]]])"
  494. ]
  495. },
  496. "execution_count": 22,
  497. "metadata": {},
  498. "output_type": "execute_result"
  499. }
  500. ],
  501. "source": [
  502. "b.unsqueeze(1) # 注意形状,在第1维(下标从0开始)上增加“1”"
  503. ]
  504. },
  505. {
  506. "cell_type": "code",
  507. "execution_count": 23,
  508. "metadata": {},
  509. "outputs": [
  510. {
  511. "data": {
  512. "text/plain": [
  513. "tensor([[[0, 1, 2]],\n",
  514. "\n",
  515. " [[3, 4, 5]]])"
  516. ]
  517. },
  518. "execution_count": 23,
  519. "metadata": {},
  520. "output_type": "execute_result"
  521. }
  522. ],
  523. "source": [
  524. "b.unsqueeze(-2) # -2表示倒数第二个维度"
  525. ]
  526. },
  527. {
  528. "cell_type": "code",
  529. "execution_count": 24,
  530. "metadata": {
  531. "scrolled": true
  532. },
  533. "outputs": [
  534. {
  535. "data": {
  536. "text/plain": [
  537. "tensor([[[[0, 1, 2],\n",
  538. " [3, 4, 5]]]])"
  539. ]
  540. },
  541. "execution_count": 24,
  542. "metadata": {},
  543. "output_type": "execute_result"
  544. }
  545. ],
  546. "source": [
  547. "c = b.view(1, 1, 1, 2, 3)\n",
  548. "c.squeeze(0) # 压缩第0维的“1”"
  549. ]
  550. },
  551. {
  552. "cell_type": "code",
  553. "execution_count": 25,
  554. "metadata": {},
  555. "outputs": [
  556. {
  557. "data": {
  558. "text/plain": [
  559. "tensor([[0, 1, 2],\n",
  560. " [3, 4, 5]])"
  561. ]
  562. },
  563. "execution_count": 25,
  564. "metadata": {},
  565. "output_type": "execute_result"
  566. }
  567. ],
  568. "source": [
  569. "c.squeeze() # 把所有维度为“1”的压缩"
  570. ]
  571. },
  572. {
  573. "cell_type": "code",
  574. "execution_count": 26,
  575. "metadata": {},
  576. "outputs": [
  577. {
  578. "data": {
  579. "text/plain": [
  580. "tensor([[ 0, 100, 2],\n",
  581. " [ 3, 4, 5]])"
  582. ]
  583. },
  584. "execution_count": 26,
  585. "metadata": {},
  586. "output_type": "execute_result"
  587. }
  588. ],
  589. "source": [
  590. "a[1] = 100\n",
  591. "b # a修改,b作为view之后的,也会跟着修改"
  592. ]
  593. },
  594. {
  595. "cell_type": "markdown",
  596. "metadata": {},
  597. "source": [
  598. "`resize`是另一种可用来调整`size`的方法,但与`view`不同,它可以修改tensor的大小。如果新大小超过了原大小,会自动分配新的内存空间,而如果新大小小于原大小,则之前的数据依旧会被保存,看一个例子。"
  599. ]
  600. },
  601. {
  602. "cell_type": "code",
  603. "execution_count": 24,
  604. "metadata": {},
  605. "outputs": [
  606. {
  607. "data": {
  608. "text/plain": [
  609. "\n",
  610. " 0 100 2\n",
  611. "[torch.FloatTensor of size 1x3]"
  612. ]
  613. },
  614. "execution_count": 24,
  615. "metadata": {},
  616. "output_type": "execute_result"
  617. }
  618. ],
  619. "source": [
  620. "b.resize_(1, 3)\n",
  621. "b"
  622. ]
  623. },
  624. {
  625. "cell_type": "code",
  626. "execution_count": 25,
  627. "metadata": {
  628. "scrolled": true
  629. },
  630. "outputs": [
  631. {
  632. "data": {
  633. "text/plain": [
  634. "\n",
  635. " 0.0000e+00 1.0000e+02 2.0000e+00\n",
  636. " 3.0000e+00 4.0000e+00 5.0000e+00\n",
  637. " 4.1417e+36 4.5731e-41 6.7262e-44\n",
  638. "[torch.FloatTensor of size 3x3]"
  639. ]
  640. },
  641. "execution_count": 25,
  642. "metadata": {},
  643. "output_type": "execute_result"
  644. }
  645. ],
  646. "source": [
  647. "b.resize_(3, 3) # 旧的数据依旧保存着,多出的大小会分配新空间\n",
  648. "b"
  649. ]
  650. },
  651. {
  652. "cell_type": "markdown",
  653. "metadata": {},
  654. "source": [
  655. "#### 索引操作\n",
  656. "\n",
  657. "Tensor支持与numpy.ndarray类似的索引操作,语法上也类似,下面通过一些例子,讲解常用的索引操作。如无特殊说明,索引出来的结果与原tensor共享内存,也即修改一个,另一个会跟着修改。"
  658. ]
  659. },
  660. {
  661. "cell_type": "code",
  662. "execution_count": 26,
  663. "metadata": {},
  664. "outputs": [
  665. {
  666. "data": {
  667. "text/plain": [
  668. "\n",
  669. " 0.2355 0.8276 0.6279 -2.3826\n",
  670. " 0.3533 1.3359 0.1627 1.7314\n",
  671. " 0.8121 0.3059 2.4352 1.4577\n",
  672. "[torch.FloatTensor of size 3x4]"
  673. ]
  674. },
  675. "execution_count": 26,
  676. "metadata": {},
  677. "output_type": "execute_result"
  678. }
  679. ],
  680. "source": [
  681. "a = t.randn(3, 4)\n",
  682. "a"
  683. ]
  684. },
  685. {
  686. "cell_type": "code",
  687. "execution_count": 27,
  688. "metadata": {},
  689. "outputs": [
  690. {
  691. "data": {
  692. "text/plain": [
  693. "\n",
  694. " 0.2355\n",
  695. " 0.8276\n",
  696. " 0.6279\n",
  697. "-2.3826\n",
  698. "[torch.FloatTensor of size 4]"
  699. ]
  700. },
  701. "execution_count": 27,
  702. "metadata": {},
  703. "output_type": "execute_result"
  704. }
  705. ],
  706. "source": [
  707. "a[0] # 第0行(下标从0开始)"
  708. ]
  709. },
  710. {
  711. "cell_type": "code",
  712. "execution_count": 28,
  713. "metadata": {},
  714. "outputs": [
  715. {
  716. "data": {
  717. "text/plain": [
  718. "\n",
  719. " 0.2355\n",
  720. " 0.3533\n",
  721. " 0.8121\n",
  722. "[torch.FloatTensor of size 3]"
  723. ]
  724. },
  725. "execution_count": 28,
  726. "metadata": {},
  727. "output_type": "execute_result"
  728. }
  729. ],
  730. "source": [
  731. "a[:, 0] # 第0列"
  732. ]
  733. },
  734. {
  735. "cell_type": "code",
  736. "execution_count": 29,
  737. "metadata": {},
  738. "outputs": [
  739. {
  740. "data": {
  741. "text/plain": [
  742. "0.6279084086418152"
  743. ]
  744. },
  745. "execution_count": 29,
  746. "metadata": {},
  747. "output_type": "execute_result"
  748. }
  749. ],
  750. "source": [
  751. "a[0][2] # 第0行第2个元素,等价于a[0, 2]"
  752. ]
  753. },
  754. {
  755. "cell_type": "code",
  756. "execution_count": 30,
  757. "metadata": {},
  758. "outputs": [
  759. {
  760. "data": {
  761. "text/plain": [
  762. "-2.3825833797454834"
  763. ]
  764. },
  765. "execution_count": 30,
  766. "metadata": {},
  767. "output_type": "execute_result"
  768. }
  769. ],
  770. "source": [
  771. "a[0, -1] # 第0行最后一个元素"
  772. ]
  773. },
  774. {
  775. "cell_type": "code",
  776. "execution_count": 31,
  777. "metadata": {
  778. "scrolled": true
  779. },
  780. "outputs": [
  781. {
  782. "data": {
  783. "text/plain": [
  784. "\n",
  785. " 0.2355 0.8276 0.6279 -2.3826\n",
  786. " 0.3533 1.3359 0.1627 1.7314\n",
  787. "[torch.FloatTensor of size 2x4]"
  788. ]
  789. },
  790. "execution_count": 31,
  791. "metadata": {},
  792. "output_type": "execute_result"
  793. }
  794. ],
  795. "source": [
  796. "a[:2] # 前两行"
  797. ]
  798. },
  799. {
  800. "cell_type": "code",
  801. "execution_count": 32,
  802. "metadata": {},
  803. "outputs": [
  804. {
  805. "data": {
  806. "text/plain": [
  807. "\n",
  808. " 0.2355 0.8276\n",
  809. " 0.3533 1.3359\n",
  810. "[torch.FloatTensor of size 2x2]"
  811. ]
  812. },
  813. "execution_count": 32,
  814. "metadata": {},
  815. "output_type": "execute_result"
  816. }
  817. ],
  818. "source": [
  819. "a[:2, 0:2] # 前两行,第0,1列"
  820. ]
  821. },
  822. {
  823. "cell_type": "code",
  824. "execution_count": 33,
  825. "metadata": {},
  826. "outputs": [
  827. {
  828. "name": "stdout",
  829. "output_type": "stream",
  830. "text": [
  831. "\n",
  832. " 0.2355 0.8276\n",
  833. "[torch.FloatTensor of size 1x2]\n",
  834. "\n",
  835. "\n",
  836. " 0.2355\n",
  837. " 0.8276\n",
  838. "[torch.FloatTensor of size 2]\n",
  839. "\n"
  840. ]
  841. }
  842. ],
  843. "source": [
  844. "print(a[0:1, :2]) # 第0行,前两列 \n",
  845. "print(a[0, :2]) # 注意两者的区别:形状不同"
  846. ]
  847. },
  848. {
  849. "cell_type": "code",
  850. "execution_count": 34,
  851. "metadata": {},
  852. "outputs": [
  853. {
  854. "data": {
  855. "text/plain": [
  856. "\n",
  857. " 0 0 0 0\n",
  858. " 0 1 0 1\n",
  859. " 0 0 1 1\n",
  860. "[torch.ByteTensor of size 3x4]"
  861. ]
  862. },
  863. "execution_count": 34,
  864. "metadata": {},
  865. "output_type": "execute_result"
  866. }
  867. ],
  868. "source": [
  869. "a > 1 # 返回一个ByteTensor"
  870. ]
  871. },
  872. {
  873. "cell_type": "code",
  874. "execution_count": 35,
  875. "metadata": {},
  876. "outputs": [
  877. {
  878. "data": {
  879. "text/plain": [
  880. "\n",
  881. " 1.3359\n",
  882. " 1.7314\n",
  883. " 2.4352\n",
  884. " 1.4577\n",
  885. "[torch.FloatTensor of size 4]"
  886. ]
  887. },
  888. "execution_count": 35,
  889. "metadata": {},
  890. "output_type": "execute_result"
  891. }
  892. ],
  893. "source": [
  894. "a[a>1] # 等价于a.masked_select(a>1)\n",
  895. "# 选择结果与原tensor不共享内存空间"
  896. ]
  897. },
  898. {
  899. "cell_type": "code",
  900. "execution_count": 36,
  901. "metadata": {
  902. "scrolled": true
  903. },
  904. "outputs": [
  905. {
  906. "data": {
  907. "text/plain": [
  908. "\n",
  909. " 0.2355 0.8276 0.6279 -2.3826\n",
  910. " 0.3533 1.3359 0.1627 1.7314\n",
  911. "[torch.FloatTensor of size 2x4]"
  912. ]
  913. },
  914. "execution_count": 36,
  915. "metadata": {},
  916. "output_type": "execute_result"
  917. }
  918. ],
  919. "source": [
  920. "a[t.LongTensor([0,1])] # 第0行和第1行"
  921. ]
  922. },
  923. {
  924. "cell_type": "markdown",
  925. "metadata": {},
  926. "source": [
  927. "其它常用的选择函数如表3-2所示。\n",
  928. "\n",
  929. "表3-2常用的选择函数\n",
  930. "\n",
  931. "函数|功能|\n",
  932. ":---:|:---:|\n",
  933. "index_select(input, dim, index)|在指定维度dim上选取,比如选取某些行、某些列\n",
  934. "masked_select(input, mask)|例子如上,a[a>0],使用ByteTensor进行选取\n",
  935. "non_zero(input)|非0元素的下标\n",
  936. "gather(input, dim, index)|根据index,在dim维度上选取数据,输出的size与index一样\n",
  937. "\n",
  938. "\n",
  939. "`gather`是一个比较复杂的操作,对一个2维tensor,输出的每个元素如下:\n",
  940. "\n",
  941. "```python\n",
  942. "out[i][j] = input[index[i][j]][j] # dim=0\n",
  943. "out[i][j] = input[i][index[i][j]] # dim=1\n",
  944. "```\n",
  945. "三维tensor的`gather`操作同理,下面举几个例子。"
  946. ]
  947. },
  948. {
  949. "cell_type": "code",
  950. "execution_count": 37,
  951. "metadata": {},
  952. "outputs": [
  953. {
  954. "data": {
  955. "text/plain": [
  956. "\n",
  957. " 0 1 2 3\n",
  958. " 4 5 6 7\n",
  959. " 8 9 10 11\n",
  960. " 12 13 14 15\n",
  961. "[torch.FloatTensor of size 4x4]"
  962. ]
  963. },
  964. "execution_count": 37,
  965. "metadata": {},
  966. "output_type": "execute_result"
  967. }
  968. ],
  969. "source": [
  970. "a = t.arange(0, 16).view(4, 4)\n",
  971. "a"
  972. ]
  973. },
  974. {
  975. "cell_type": "code",
  976. "execution_count": 38,
  977. "metadata": {},
  978. "outputs": [
  979. {
  980. "data": {
  981. "text/plain": [
  982. "\n",
  983. " 0 5 10 15\n",
  984. "[torch.FloatTensor of size 1x4]"
  985. ]
  986. },
  987. "execution_count": 38,
  988. "metadata": {},
  989. "output_type": "execute_result"
  990. }
  991. ],
  992. "source": [
  993. "# 选取对角线的元素\n",
  994. "index = t.LongTensor([[0,1,2,3]])\n",
  995. "a.gather(0, index)"
  996. ]
  997. },
  998. {
  999. "cell_type": "code",
  1000. "execution_count": 39,
  1001. "metadata": {},
  1002. "outputs": [
  1003. {
  1004. "data": {
  1005. "text/plain": [
  1006. "\n",
  1007. " 3\n",
  1008. " 6\n",
  1009. " 9\n",
  1010. " 12\n",
  1011. "[torch.FloatTensor of size 4x1]"
  1012. ]
  1013. },
  1014. "execution_count": 39,
  1015. "metadata": {},
  1016. "output_type": "execute_result"
  1017. }
  1018. ],
  1019. "source": [
  1020. "# 选取反对角线上的元素\n",
  1021. "index = t.LongTensor([[3,2,1,0]]).t()\n",
  1022. "a.gather(1, index)"
  1023. ]
  1024. },
  1025. {
  1026. "cell_type": "code",
  1027. "execution_count": 40,
  1028. "metadata": {},
  1029. "outputs": [
  1030. {
  1031. "data": {
  1032. "text/plain": [
  1033. "\n",
  1034. " 12 9 6 3\n",
  1035. "[torch.FloatTensor of size 1x4]"
  1036. ]
  1037. },
  1038. "execution_count": 40,
  1039. "metadata": {},
  1040. "output_type": "execute_result"
  1041. }
  1042. ],
  1043. "source": [
  1044. "# 选取反对角线上的元素,注意与上面的不同\n",
  1045. "index = t.LongTensor([[3,2,1,0]])\n",
  1046. "a.gather(0, index)"
  1047. ]
  1048. },
  1049. {
  1050. "cell_type": "code",
  1051. "execution_count": 41,
  1052. "metadata": {},
  1053. "outputs": [
  1054. {
  1055. "data": {
  1056. "text/plain": [
  1057. "\n",
  1058. " 0 3\n",
  1059. " 5 6\n",
  1060. " 10 9\n",
  1061. " 15 12\n",
  1062. "[torch.FloatTensor of size 4x2]"
  1063. ]
  1064. },
  1065. "execution_count": 41,
  1066. "metadata": {},
  1067. "output_type": "execute_result"
  1068. }
  1069. ],
  1070. "source": [
  1071. "# 选取两个对角线上的元素\n",
  1072. "index = t.LongTensor([[0,1,2,3],[3,2,1,0]]).t()\n",
  1073. "b = a.gather(1, index)\n",
  1074. "b"
  1075. ]
  1076. },
  1077. {
  1078. "cell_type": "markdown",
  1079. "metadata": {},
  1080. "source": [
  1081. "与`gather`相对应的逆操作是`scatter_`,`gather`把数据从input中按index取出,而`scatter_`是把取出的数据再放回去。注意`scatter_`函数是inplace操作。\n",
  1082. "\n",
  1083. "```python\n",
  1084. "out = input.gather(dim, index)\n",
  1085. "-->近似逆操作\n",
  1086. "out = Tensor()\n",
  1087. "out.scatter_(dim, index)\n",
  1088. "```"
  1089. ]
  1090. },
  1091. {
  1092. "cell_type": "code",
  1093. "execution_count": 42,
  1094. "metadata": {
  1095. "scrolled": true
  1096. },
  1097. "outputs": [
  1098. {
  1099. "data": {
  1100. "text/plain": [
  1101. "\n",
  1102. " 0 0 0 3\n",
  1103. " 0 5 6 0\n",
  1104. " 0 9 10 0\n",
  1105. " 12 0 0 15\n",
  1106. "[torch.FloatTensor of size 4x4]"
  1107. ]
  1108. },
  1109. "execution_count": 42,
  1110. "metadata": {},
  1111. "output_type": "execute_result"
  1112. }
  1113. ],
  1114. "source": [
  1115. "# 把两个对角线元素放回去到指定位置\n",
  1116. "c = t.zeros(4,4)\n",
  1117. "c.scatter_(1, index, b)"
  1118. ]
  1119. },
  1120. {
  1121. "cell_type": "markdown",
  1122. "metadata": {},
  1123. "source": [
  1124. "#### 高级索引\n",
  1125. "PyTorch在0.2版本中完善了索引操作,目前已经支持绝大多数numpy的高级索引[^10]。高级索引可以看成是普通索引操作的扩展,但是高级索引操作的结果一般不和原始的Tensor贡献内出。 \n",
  1126. "[^10]: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing"
  1127. ]
  1128. },
  1129. {
  1130. "cell_type": "code",
  1131. "execution_count": 43,
  1132. "metadata": {},
  1133. "outputs": [
  1134. {
  1135. "data": {
  1136. "text/plain": [
  1137. "\n",
  1138. "(0 ,.,.) = \n",
  1139. " 0 1 2\n",
  1140. " 3 4 5\n",
  1141. " 6 7 8\n",
  1142. "\n",
  1143. "(1 ,.,.) = \n",
  1144. " 9 10 11\n",
  1145. " 12 13 14\n",
  1146. " 15 16 17\n",
  1147. "\n",
  1148. "(2 ,.,.) = \n",
  1149. " 18 19 20\n",
  1150. " 21 22 23\n",
  1151. " 24 25 26\n",
  1152. "[torch.FloatTensor of size 3x3x3]"
  1153. ]
  1154. },
  1155. "execution_count": 43,
  1156. "metadata": {},
  1157. "output_type": "execute_result"
  1158. }
  1159. ],
  1160. "source": [
  1161. "x = t.arange(0,27).view(3,3,3)\n",
  1162. "x"
  1163. ]
  1164. },
  1165. {
  1166. "cell_type": "code",
  1167. "execution_count": 44,
  1168. "metadata": {},
  1169. "outputs": [
  1170. {
  1171. "data": {
  1172. "text/plain": [
  1173. "\n",
  1174. " 14\n",
  1175. " 24\n",
  1176. "[torch.FloatTensor of size 2]"
  1177. ]
  1178. },
  1179. "execution_count": 44,
  1180. "metadata": {},
  1181. "output_type": "execute_result"
  1182. }
  1183. ],
  1184. "source": [
  1185. "x[[1, 2], [1, 2], [2, 0]] # x[1,1,2]和x[2,2,0]"
  1186. ]
  1187. },
  1188. {
  1189. "cell_type": "code",
  1190. "execution_count": 45,
  1191. "metadata": {},
  1192. "outputs": [
  1193. {
  1194. "data": {
  1195. "text/plain": [
  1196. "\n",
  1197. " 19\n",
  1198. " 10\n",
  1199. " 1\n",
  1200. "[torch.FloatTensor of size 3]"
  1201. ]
  1202. },
  1203. "execution_count": 45,
  1204. "metadata": {},
  1205. "output_type": "execute_result"
  1206. }
  1207. ],
  1208. "source": [
  1209. "x[[2, 1, 0], [0], [1]] # x[2,0,1],x[1,0,1],x[0,0,1]"
  1210. ]
  1211. },
  1212. {
  1213. "cell_type": "code",
  1214. "execution_count": 46,
  1215. "metadata": {},
  1216. "outputs": [
  1217. {
  1218. "data": {
  1219. "text/plain": [
  1220. "\n",
  1221. "(0 ,.,.) = \n",
  1222. " 0 1 2\n",
  1223. " 3 4 5\n",
  1224. " 6 7 8\n",
  1225. "\n",
  1226. "(1 ,.,.) = \n",
  1227. " 18 19 20\n",
  1228. " 21 22 23\n",
  1229. " 24 25 26\n",
  1230. "[torch.FloatTensor of size 2x3x3]"
  1231. ]
  1232. },
  1233. "execution_count": 46,
  1234. "metadata": {},
  1235. "output_type": "execute_result"
  1236. }
  1237. ],
  1238. "source": [
  1239. "x[[0, 2], ...] # x[0] 和 x[2]"
  1240. ]
  1241. },
  1242. {
  1243. "cell_type": "markdown",
  1244. "metadata": {},
  1245. "source": [
  1246. "#### Tensor类型\n",
  1247. "\n",
  1248. "Tensor有不同的数据类型,如表3-3所示,每种类型分别对应有CPU和GPU版本(HalfTensor除外)。默认的tensor是FloatTensor,可通过`t.set_default_tensor_type` 来修改默认tensor类型(如果默认类型为GPU tensor,则所有操作都将在GPU上进行)。Tensor的类型对分析内存占用很有帮助。例如对于一个size为(1000, 1000, 1000)的FloatTensor,它有`1000*1000*1000=10^9`个元素,每个元素占32bit/8 = 4Byte内存,所以共占大约4GB内存/显存。HalfTensor是专门为GPU版本设计的,同样的元素个数,显存占用只有FloatTensor的一半,所以可以极大缓解GPU显存不足的问题,但由于HalfTensor所能表示的数值大小和精度有限[^2],所以可能出现溢出等问题。\n",
  1249. "\n",
  1250. "[^2]: https://stackoverflow.com/questions/872544/what-range-of-numbers-can-be-represented-in-a-16-32-and-64-bit-ieee-754-syste\n",
  1251. "\n",
  1252. "表3-3: tensor数据类型\n",
  1253. "\n",
  1254. "数据类型|\tCPU tensor\t|GPU tensor|\n",
  1255. ":---:|:---:|:--:|\n",
  1256. "32-bit 浮点|\ttorch.FloatTensor\t|torch.cuda.FloatTensor\n",
  1257. "64-bit 浮点|\ttorch.DoubleTensor|\ttorch.cuda.DoubleTensor\n",
  1258. "16-bit 半精度浮点|\tN/A\t|torch.cuda.HalfTensor\n",
  1259. "8-bit 无符号整形(0~255)|\ttorch.ByteTensor|\ttorch.cuda.ByteTensor\n",
  1260. "8-bit 有符号整形(-128~127)|\ttorch.CharTensor\t|torch.cuda.CharTensor\n",
  1261. "16-bit 有符号整形 |\ttorch.ShortTensor|\ttorch.cuda.ShortTensor\n",
  1262. "32-bit 有符号整形 \t|torch.IntTensor\t|torch.cuda.IntTensor\n",
  1263. "64-bit 有符号整形 \t|torch.LongTensor\t|torch.cuda.LongTensor\n",
  1264. "\n",
  1265. "各数据类型之间可以互相转换,`type(new_type)`是通用的做法,同时还有`float`、`long`、`half`等快捷方法。CPU tensor与GPU tensor之间的互相转换通过`tensor.cuda`和`tensor.cpu`方法实现。Tensor还有一个`new`方法,用法与`t.Tensor`一样,会调用该tensor对应类型的构造函数,生成与当前tensor类型一致的tensor。"
  1266. ]
  1267. },
  1268. {
  1269. "cell_type": "code",
  1270. "execution_count": 47,
  1271. "metadata": {},
  1272. "outputs": [],
  1273. "source": [
  1274. "# 设置默认tensor,注意参数是字符串\n",
  1275. "t.set_default_tensor_type('torch.IntTensor')"
  1276. ]
  1277. },
  1278. {
  1279. "cell_type": "code",
  1280. "execution_count": 48,
  1281. "metadata": {},
  1282. "outputs": [
  1283. {
  1284. "data": {
  1285. "text/plain": [
  1286. "\n",
  1287. "-1.7683e+09 2.1918e+04 1.0000e+00\n",
  1288. " 0.0000e+00 1.0000e+00 0.0000e+00\n",
  1289. "[torch.IntTensor of size 2x3]"
  1290. ]
  1291. },
  1292. "execution_count": 48,
  1293. "metadata": {},
  1294. "output_type": "execute_result"
  1295. }
  1296. ],
  1297. "source": [
  1298. "a = t.Tensor(2,3)\n",
  1299. "a # 现在a是IntTensor"
  1300. ]
  1301. },
  1302. {
  1303. "cell_type": "code",
  1304. "execution_count": 49,
  1305. "metadata": {},
  1306. "outputs": [
  1307. {
  1308. "data": {
  1309. "text/plain": [
  1310. "\n",
  1311. "-1.7683e+09 2.1918e+04 1.0000e+00\n",
  1312. " 0.0000e+00 1.0000e+00 0.0000e+00\n",
  1313. "[torch.FloatTensor of size 2x3]"
  1314. ]
  1315. },
  1316. "execution_count": 49,
  1317. "metadata": {},
  1318. "output_type": "execute_result"
  1319. }
  1320. ],
  1321. "source": [
  1322. "# 把a转成FloatTensor,等价于b=a.type(t.FloatTensor)\n",
  1323. "b = a.float() \n",
  1324. "b"
  1325. ]
  1326. },
  1327. {
  1328. "cell_type": "code",
  1329. "execution_count": 50,
  1330. "metadata": {
  1331. "scrolled": true
  1332. },
  1333. "outputs": [
  1334. {
  1335. "data": {
  1336. "text/plain": [
  1337. "\n",
  1338. "-1.7683e+09 2.1918e+04 1.0000e+00\n",
  1339. " 0.0000e+00 1.0000e+00 0.0000e+00\n",
  1340. "[torch.FloatTensor of size 2x3]"
  1341. ]
  1342. },
  1343. "execution_count": 50,
  1344. "metadata": {},
  1345. "output_type": "execute_result"
  1346. }
  1347. ],
  1348. "source": [
  1349. "c = a.type_as(b)\n",
  1350. "c"
  1351. ]
  1352. },
  1353. {
  1354. "cell_type": "code",
  1355. "execution_count": 51,
  1356. "metadata": {},
  1357. "outputs": [
  1358. {
  1359. "data": {
  1360. "text/plain": [
  1361. "\n",
  1362. "-1.7682e+09 2.1918e+04 3.0000e+00\n",
  1363. " 0.0000e+00 1.0000e+00 0.0000e+00\n",
  1364. "[torch.IntTensor of size 2x3]"
  1365. ]
  1366. },
  1367. "execution_count": 51,
  1368. "metadata": {},
  1369. "output_type": "execute_result"
  1370. }
  1371. ],
  1372. "source": [
  1373. "d = a.new(2,3) # 等价于torch.IntTensor(2,3)\n",
  1374. "d"
  1375. ]
  1376. },
  1377. {
  1378. "cell_type": "code",
  1379. "execution_count": 52,
  1380. "metadata": {},
  1381. "outputs": [
  1382. {
  1383. "data": {
  1384. "text/plain": [
  1385. "\u001b[0;31mSignature:\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnew\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  1386. "\u001b[0;31mSource:\u001b[0m \n",
  1387. " \u001b[0;32mdef\u001b[0m \u001b[0mnew\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\n",
  1388. "\u001b[0;34m\u001b[0m \u001b[0;34mr\"\"\"Constructs a new tensor of the same data type as :attr:`self` tensor.\u001b[0m\n",
  1389. "\u001b[0;34m\u001b[0m\n",
  1390. "\u001b[0;34m Any valid argument combination to the tensor constructor is accepted by\u001b[0m\n",
  1391. "\u001b[0;34m this method, including sizes, :class:`torch.Storage`, NumPy ndarray,\u001b[0m\n",
  1392. "\u001b[0;34m Python Sequence, etc. See :ref:`torch.Tensor <tensor-doc>` for more\u001b[0m\n",
  1393. "\u001b[0;34m details.\u001b[0m\n",
  1394. "\u001b[0;34m\u001b[0m\n",
  1395. "\u001b[0;34m .. note:: For CUDA tensors, this method will create new tensor on the\u001b[0m\n",
  1396. "\u001b[0;34m same device as this tensor.\u001b[0m\n",
  1397. "\u001b[0;34m \"\"\"\u001b[0m\u001b[0;34m\u001b[0m\n",
  1398. "\u001b[0;34m\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__class__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  1399. "\u001b[0;31mFile:\u001b[0m /usr/lib/python3.6/site-packages/torch/tensor.py\n",
  1400. "\u001b[0;31mType:\u001b[0m method\n"
  1401. ]
  1402. },
  1403. "metadata": {},
  1404. "output_type": "display_data"
  1405. }
  1406. ],
  1407. "source": [
  1408. "# 查看函数new的源码\n",
  1409. "a.new??"
  1410. ]
  1411. },
  1412. {
  1413. "cell_type": "code",
  1414. "execution_count": 53,
  1415. "metadata": {},
  1416. "outputs": [],
  1417. "source": [
  1418. "# 恢复之前的默认设置\n",
  1419. "t.set_default_tensor_type('torch.FloatTensor')"
  1420. ]
  1421. },
  1422. {
  1423. "cell_type": "markdown",
  1424. "metadata": {},
  1425. "source": [
  1426. "#### 逐元素操作\n",
  1427. "\n",
  1428. "这部分操作会对tensor的每一个元素(point-wise,又名element-wise)进行操作,此类操作的输入与输出形状一致。常用的操作如表3-4所示。\n",
  1429. "\n",
  1430. "表3-4: 常见的逐元素操作\n",
  1431. "\n",
  1432. "|函数|功能|\n",
  1433. "|:--:|:--:|\n",
  1434. "|abs/sqrt/div/exp/fmod/log/pow..|绝对值/平方根/除法/指数/求余/求幂..|\n",
  1435. "|cos/sin/asin/atan2/cosh..|相关三角函数|\n",
  1436. "|ceil/round/floor/trunc| 上取整/四舍五入/下取整/只保留整数部分|\n",
  1437. "|clamp(input, min, max)|超过min和max部分截断|\n",
  1438. "|sigmod/tanh..|激活函数\n",
  1439. "\n",
  1440. "对于很多操作,例如div、mul、pow、fmod等,PyTorch都实现了运算符重载,所以可以直接使用运算符。如`a ** 2` 等价于`torch.pow(a,2)`, `a * 2`等价于`torch.mul(a,2)`。\n",
  1441. "\n",
  1442. "其中`clamp(x, min, max)`的输出满足以下公式:\n",
  1443. "$$\n",
  1444. "y_i =\n",
  1445. "\\begin{cases}\n",
  1446. "min, & \\text{if } x_i \\lt min \\\\\n",
  1447. "x_i, & \\text{if } min \\le x_i \\le max \\\\\n",
  1448. "max, & \\text{if } x_i \\gt max\\\\\n",
  1449. "\\end{cases}\n",
  1450. "$$\n",
  1451. "`clamp`常用在某些需要比较大小的地方,如取一个tensor的每个元素与另一个数的较大值。"
  1452. ]
  1453. },
  1454. {
  1455. "cell_type": "code",
  1456. "execution_count": 54,
  1457. "metadata": {
  1458. "scrolled": true
  1459. },
  1460. "outputs": [
  1461. {
  1462. "data": {
  1463. "text/plain": [
  1464. "\n",
  1465. " 1.0000 0.5403 -0.4161\n",
  1466. "-0.9900 -0.6536 0.2837\n",
  1467. "[torch.FloatTensor of size 2x3]"
  1468. ]
  1469. },
  1470. "execution_count": 54,
  1471. "metadata": {},
  1472. "output_type": "execute_result"
  1473. }
  1474. ],
  1475. "source": [
  1476. "a = t.arange(0, 6).view(2, 3)\n",
  1477. "t.cos(a)"
  1478. ]
  1479. },
  1480. {
  1481. "cell_type": "code",
  1482. "execution_count": 55,
  1483. "metadata": {},
  1484. "outputs": [
  1485. {
  1486. "data": {
  1487. "text/plain": [
  1488. "\n",
  1489. " 0 1 2\n",
  1490. " 0 1 2\n",
  1491. "[torch.FloatTensor of size 2x3]"
  1492. ]
  1493. },
  1494. "execution_count": 55,
  1495. "metadata": {},
  1496. "output_type": "execute_result"
  1497. }
  1498. ],
  1499. "source": [
  1500. "a % 3 # 等价于t.fmod(a, 3)"
  1501. ]
  1502. },
  1503. {
  1504. "cell_type": "code",
  1505. "execution_count": 56,
  1506. "metadata": {},
  1507. "outputs": [
  1508. {
  1509. "data": {
  1510. "text/plain": [
  1511. "\n",
  1512. " 0 1 4\n",
  1513. " 9 16 25\n",
  1514. "[torch.FloatTensor of size 2x3]"
  1515. ]
  1516. },
  1517. "execution_count": 56,
  1518. "metadata": {},
  1519. "output_type": "execute_result"
  1520. }
  1521. ],
  1522. "source": [
  1523. "a ** 2 # 等价于t.pow(a, 2)"
  1524. ]
  1525. },
  1526. {
  1527. "cell_type": "code",
  1528. "execution_count": 57,
  1529. "metadata": {},
  1530. "outputs": [
  1531. {
  1532. "name": "stdout",
  1533. "output_type": "stream",
  1534. "text": [
  1535. "\n",
  1536. " 0 1 2\n",
  1537. " 3 4 5\n",
  1538. "[torch.FloatTensor of size 2x3]\n",
  1539. "\n"
  1540. ]
  1541. },
  1542. {
  1543. "data": {
  1544. "text/plain": [
  1545. "\n",
  1546. " 3 3 3\n",
  1547. " 3 4 5\n",
  1548. "[torch.FloatTensor of size 2x3]"
  1549. ]
  1550. },
  1551. "execution_count": 57,
  1552. "metadata": {},
  1553. "output_type": "execute_result"
  1554. }
  1555. ],
  1556. "source": [
  1557. "# 取a中的每一个元素与3相比较大的一个 (小于3的截断成3)\n",
  1558. "print(a)\n",
  1559. "t.clamp(a, min=3)"
  1560. ]
  1561. },
  1562. {
  1563. "cell_type": "markdown",
  1564. "metadata": {},
  1565. "source": [
  1566. "#### 归并操作 \n",
  1567. "此类操作会使输出形状小于输入形状,并可以沿着某一维度进行指定操作。如加法`sum`,既可以计算整个tensor的和,也可以计算tensor中每一行或每一列的和。常用的归并操作如表3-5所示。\n",
  1568. "\n",
  1569. "表3-5: 常用归并操作\n",
  1570. "\n",
  1571. "|函数|功能|\n",
  1572. "|:---:|:---:|\n",
  1573. "|mean/sum/median/mode|均值/和/中位数/众数|\n",
  1574. "|norm/dist|范数/距离|\n",
  1575. "|std/var|标准差/方差|\n",
  1576. "|cumsum/cumprod|累加/累乘|\n",
  1577. "\n",
  1578. "以上大多数函数都有一个参数**`dim`**,用来指定这些操作是在哪个维度上执行的。关于dim(对应于Numpy中的axis)的解释众说纷纭,这里提供一个简单的记忆方式:\n",
  1579. "\n",
  1580. "假设输入的形状是(m, n, k)\n",
  1581. "\n",
  1582. "- 如果指定dim=0,输出的形状就是(1, n, k)或者(n, k)\n",
  1583. "- 如果指定dim=1,输出的形状就是(m, 1, k)或者(m, k)\n",
  1584. "- 如果指定dim=2,输出的形状就是(m, n, 1)或者(m, n)\n",
  1585. "\n",
  1586. "size中是否有\"1\",取决于参数`keepdim`,`keepdim=True`会保留维度`1`。注意,以上只是经验总结,并非所有函数都符合这种形状变化方式,如`cumsum`。"
  1587. ]
  1588. },
  1589. {
  1590. "cell_type": "code",
  1591. "execution_count": 58,
  1592. "metadata": {},
  1593. "outputs": [
  1594. {
  1595. "data": {
  1596. "text/plain": [
  1597. "\n",
  1598. " 2 2 2\n",
  1599. "[torch.FloatTensor of size 1x3]"
  1600. ]
  1601. },
  1602. "execution_count": 58,
  1603. "metadata": {},
  1604. "output_type": "execute_result"
  1605. }
  1606. ],
  1607. "source": [
  1608. "b = t.ones(2, 3)\n",
  1609. "b.sum(dim = 0, keepdim=True)"
  1610. ]
  1611. },
  1612. {
  1613. "cell_type": "code",
  1614. "execution_count": 59,
  1615. "metadata": {},
  1616. "outputs": [
  1617. {
  1618. "data": {
  1619. "text/plain": [
  1620. "\n",
  1621. " 2\n",
  1622. " 2\n",
  1623. " 2\n",
  1624. "[torch.FloatTensor of size 3]"
  1625. ]
  1626. },
  1627. "execution_count": 59,
  1628. "metadata": {},
  1629. "output_type": "execute_result"
  1630. }
  1631. ],
  1632. "source": [
  1633. "# keepdim=False,不保留维度\"1\",注意形状\n",
  1634. "b.sum(dim=0, keepdim=False)"
  1635. ]
  1636. },
  1637. {
  1638. "cell_type": "code",
  1639. "execution_count": 60,
  1640. "metadata": {},
  1641. "outputs": [
  1642. {
  1643. "data": {
  1644. "text/plain": [
  1645. "\n",
  1646. " 3\n",
  1647. " 3\n",
  1648. "[torch.FloatTensor of size 2]"
  1649. ]
  1650. },
  1651. "execution_count": 60,
  1652. "metadata": {},
  1653. "output_type": "execute_result"
  1654. }
  1655. ],
  1656. "source": [
  1657. "b.sum(dim=1)"
  1658. ]
  1659. },
  1660. {
  1661. "cell_type": "code",
  1662. "execution_count": 61,
  1663. "metadata": {},
  1664. "outputs": [
  1665. {
  1666. "name": "stdout",
  1667. "output_type": "stream",
  1668. "text": [
  1669. "\n",
  1670. " 0 1 2\n",
  1671. " 3 4 5\n",
  1672. "[torch.FloatTensor of size 2x3]\n",
  1673. "\n"
  1674. ]
  1675. },
  1676. {
  1677. "data": {
  1678. "text/plain": [
  1679. "\n",
  1680. " 0 1 3\n",
  1681. " 3 7 12\n",
  1682. "[torch.FloatTensor of size 2x3]"
  1683. ]
  1684. },
  1685. "execution_count": 61,
  1686. "metadata": {},
  1687. "output_type": "execute_result"
  1688. }
  1689. ],
  1690. "source": [
  1691. "a = t.arange(0, 6).view(2, 3)\n",
  1692. "print(a)\n",
  1693. "a.cumsum(dim=1) # 沿着行累加"
  1694. ]
  1695. },
  1696. {
  1697. "cell_type": "markdown",
  1698. "metadata": {},
  1699. "source": [
  1700. "#### 比较\n",
  1701. "比较函数中有一些是逐元素比较,操作类似于逐元素操作,还有一些则类似于归并操作。常用比较函数如表3-6所示。\n",
  1702. "\n",
  1703. "表3-6: 常用比较函数\n",
  1704. "\n",
  1705. "|函数|功能|\n",
  1706. "|:--:|:--:|\n",
  1707. "|gt/lt/ge/le/eq/ne|大于/小于/大于等于/小于等于/等于/不等|\n",
  1708. "|topk|最大的k个数|\n",
  1709. "|sort|排序|\n",
  1710. "|max/min|比较两个tensor最大最小值|\n",
  1711. "\n",
  1712. "表中第一行的比较操作已经实现了运算符重载,因此可以使用`a>=b`、`a>b`、`a!=b`、`a==b`,其返回结果是一个`ByteTensor`,可用来选取元素。max/min这两个操作比较特殊,以max来说,它有以下三种使用情况:\n",
  1713. "- t.max(tensor):返回tensor中最大的一个数\n",
  1714. "- t.max(tensor,dim):指定维上最大的数,返回tensor和下标\n",
  1715. "- t.max(tensor1, tensor2): 比较两个tensor相比较大的元素\n",
  1716. "\n",
  1717. "至于比较一个tensor和一个数,可以使用clamp函数。下面举例说明。"
  1718. ]
  1719. },
  1720. {
  1721. "cell_type": "code",
  1722. "execution_count": 62,
  1723. "metadata": {},
  1724. "outputs": [
  1725. {
  1726. "data": {
  1727. "text/plain": [
  1728. "\n",
  1729. " 0 3 6\n",
  1730. " 9 12 15\n",
  1731. "[torch.FloatTensor of size 2x3]"
  1732. ]
  1733. },
  1734. "execution_count": 62,
  1735. "metadata": {},
  1736. "output_type": "execute_result"
  1737. }
  1738. ],
  1739. "source": [
  1740. "a = t.linspace(0, 15, 6).view(2, 3)\n",
  1741. "a"
  1742. ]
  1743. },
  1744. {
  1745. "cell_type": "code",
  1746. "execution_count": 63,
  1747. "metadata": {},
  1748. "outputs": [
  1749. {
  1750. "data": {
  1751. "text/plain": [
  1752. "\n",
  1753. " 15 12 9\n",
  1754. " 6 3 0\n",
  1755. "[torch.FloatTensor of size 2x3]"
  1756. ]
  1757. },
  1758. "execution_count": 63,
  1759. "metadata": {},
  1760. "output_type": "execute_result"
  1761. }
  1762. ],
  1763. "source": [
  1764. "b = t.linspace(15, 0, 6).view(2, 3)\n",
  1765. "b"
  1766. ]
  1767. },
  1768. {
  1769. "cell_type": "code",
  1770. "execution_count": 64,
  1771. "metadata": {},
  1772. "outputs": [
  1773. {
  1774. "data": {
  1775. "text/plain": [
  1776. "\n",
  1777. " 0 0 0\n",
  1778. " 1 1 1\n",
  1779. "[torch.ByteTensor of size 2x3]"
  1780. ]
  1781. },
  1782. "execution_count": 64,
  1783. "metadata": {},
  1784. "output_type": "execute_result"
  1785. }
  1786. ],
  1787. "source": [
  1788. "a>b"
  1789. ]
  1790. },
  1791. {
  1792. "cell_type": "code",
  1793. "execution_count": 65,
  1794. "metadata": {
  1795. "scrolled": true
  1796. },
  1797. "outputs": [
  1798. {
  1799. "data": {
  1800. "text/plain": [
  1801. "\n",
  1802. " 9\n",
  1803. " 12\n",
  1804. " 15\n",
  1805. "[torch.FloatTensor of size 3]"
  1806. ]
  1807. },
  1808. "execution_count": 65,
  1809. "metadata": {},
  1810. "output_type": "execute_result"
  1811. }
  1812. ],
  1813. "source": [
  1814. "a[a>b] # a中大于b的元素"
  1815. ]
  1816. },
  1817. {
  1818. "cell_type": "code",
  1819. "execution_count": 66,
  1820. "metadata": {},
  1821. "outputs": [
  1822. {
  1823. "data": {
  1824. "text/plain": [
  1825. "15.0"
  1826. ]
  1827. },
  1828. "execution_count": 66,
  1829. "metadata": {},
  1830. "output_type": "execute_result"
  1831. }
  1832. ],
  1833. "source": [
  1834. "t.max(a)"
  1835. ]
  1836. },
  1837. {
  1838. "cell_type": "code",
  1839. "execution_count": 67,
  1840. "metadata": {},
  1841. "outputs": [
  1842. {
  1843. "data": {
  1844. "text/plain": [
  1845. "(\n",
  1846. " 15\n",
  1847. " 6\n",
  1848. " [torch.FloatTensor of size 2], \n",
  1849. " 0\n",
  1850. " 0\n",
  1851. " [torch.LongTensor of size 2])"
  1852. ]
  1853. },
  1854. "execution_count": 67,
  1855. "metadata": {},
  1856. "output_type": "execute_result"
  1857. }
  1858. ],
  1859. "source": [
  1860. "t.max(b, dim=1) \n",
  1861. "# 第一个返回值的15和6分别表示第0行和第1行最大的元素\n",
  1862. "# 第二个返回值的0和0表示上述最大的数是该行第0个元素"
  1863. ]
  1864. },
  1865. {
  1866. "cell_type": "code",
  1867. "execution_count": 68,
  1868. "metadata": {
  1869. "scrolled": true
  1870. },
  1871. "outputs": [
  1872. {
  1873. "data": {
  1874. "text/plain": [
  1875. "\n",
  1876. " 15 12 9\n",
  1877. " 9 12 15\n",
  1878. "[torch.FloatTensor of size 2x3]"
  1879. ]
  1880. },
  1881. "execution_count": 68,
  1882. "metadata": {},
  1883. "output_type": "execute_result"
  1884. }
  1885. ],
  1886. "source": [
  1887. "t.max(a,b)"
  1888. ]
  1889. },
  1890. {
  1891. "cell_type": "code",
  1892. "execution_count": 69,
  1893. "metadata": {},
  1894. "outputs": [
  1895. {
  1896. "data": {
  1897. "text/plain": [
  1898. "\n",
  1899. " 10 10 10\n",
  1900. " 10 12 15\n",
  1901. "[torch.FloatTensor of size 2x3]"
  1902. ]
  1903. },
  1904. "execution_count": 69,
  1905. "metadata": {},
  1906. "output_type": "execute_result"
  1907. }
  1908. ],
  1909. "source": [
  1910. "# 比较a和10较大的元素\n",
  1911. "t.clamp(a, min=10)"
  1912. ]
  1913. },
  1914. {
  1915. "cell_type": "markdown",
  1916. "metadata": {},
  1917. "source": [
  1918. "#### 线性代数\n",
  1919. "\n",
  1920. "PyTorch的线性函数主要封装了Blas和Lapack,其用法和接口都与之类似。常用的线性代数函数如表3-7所示。\n",
  1921. "\n",
  1922. "表3-7: 常用的线性代数函数\n",
  1923. "\n",
  1924. "|函数|功能|\n",
  1925. "|:---:|:---:|\n",
  1926. "|trace|对角线元素之和(矩阵的迹)|\n",
  1927. "|diag|对角线元素|\n",
  1928. "|triu/tril|矩阵的上三角/下三角,可指定偏移量|\n",
  1929. "|mm/bmm|矩阵乘法,batch的矩阵乘法|\n",
  1930. "|addmm/addbmm/addmv/addr/badbmm..|矩阵运算\n",
  1931. "|t|转置|\n",
  1932. "|dot/cross|内积/外积\n",
  1933. "|inverse|求逆矩阵\n",
  1934. "|svd|奇异值分解\n",
  1935. "\n",
  1936. "具体使用说明请参见官方文档[^3],需要注意的是,矩阵的转置会导致存储空间不连续,需调用它的`.contiguous`方法将其转为连续。\n",
  1937. "[^3]: http://pytorch.org/docs/torch.html#blas-and-lapack-operations"
  1938. ]
  1939. },
  1940. {
  1941. "cell_type": "code",
  1942. "execution_count": 70,
  1943. "metadata": {},
  1944. "outputs": [
  1945. {
  1946. "data": {
  1947. "text/plain": [
  1948. "False"
  1949. ]
  1950. },
  1951. "execution_count": 70,
  1952. "metadata": {},
  1953. "output_type": "execute_result"
  1954. }
  1955. ],
  1956. "source": [
  1957. "b = a.t()\n",
  1958. "b.is_contiguous()"
  1959. ]
  1960. },
  1961. {
  1962. "cell_type": "code",
  1963. "execution_count": 71,
  1964. "metadata": {},
  1965. "outputs": [
  1966. {
  1967. "data": {
  1968. "text/plain": [
  1969. "\n",
  1970. " 0 9\n",
  1971. " 3 12\n",
  1972. " 6 15\n",
  1973. "[torch.FloatTensor of size 3x2]"
  1974. ]
  1975. },
  1976. "execution_count": 71,
  1977. "metadata": {},
  1978. "output_type": "execute_result"
  1979. }
  1980. ],
  1981. "source": [
  1982. "b.contiguous()"
  1983. ]
  1984. },
  1985. {
  1986. "cell_type": "markdown",
  1987. "metadata": {},
  1988. "source": [
  1989. "### 3.1.2 Tensor和Numpy\n",
  1990. "\n",
  1991. "Tensor和Numpy数组之间具有很高的相似性,彼此之间的互操作也非常简单高效。需要注意的是,Numpy和Tensor共享内存。由于Numpy历史悠久,支持丰富的操作,所以当遇到Tensor不支持的操作时,可先转成Numpy数组,处理后再转回tensor,其转换开销很小。"
  1992. ]
  1993. },
  1994. {
  1995. "cell_type": "code",
  1996. "execution_count": 72,
  1997. "metadata": {},
  1998. "outputs": [
  1999. {
  2000. "data": {
  2001. "text/plain": [
  2002. "array([[1., 1., 1.],\n",
  2003. " [1., 1., 1.]], dtype=float32)"
  2004. ]
  2005. },
  2006. "execution_count": 72,
  2007. "metadata": {},
  2008. "output_type": "execute_result"
  2009. }
  2010. ],
  2011. "source": [
  2012. "import numpy as np\n",
  2013. "a = np.ones([2, 3],dtype=np.float32)\n",
  2014. "a"
  2015. ]
  2016. },
  2017. {
  2018. "cell_type": "code",
  2019. "execution_count": 73,
  2020. "metadata": {},
  2021. "outputs": [
  2022. {
  2023. "data": {
  2024. "text/plain": [
  2025. "\n",
  2026. " 1 1 1\n",
  2027. " 1 1 1\n",
  2028. "[torch.FloatTensor of size 2x3]"
  2029. ]
  2030. },
  2031. "execution_count": 73,
  2032. "metadata": {},
  2033. "output_type": "execute_result"
  2034. }
  2035. ],
  2036. "source": [
  2037. "b = t.from_numpy(a)\n",
  2038. "b"
  2039. ]
  2040. },
  2041. {
  2042. "cell_type": "code",
  2043. "execution_count": 74,
  2044. "metadata": {},
  2045. "outputs": [
  2046. {
  2047. "data": {
  2048. "text/plain": [
  2049. "\n",
  2050. " 1 1 1\n",
  2051. " 1 1 1\n",
  2052. "[torch.FloatTensor of size 2x3]"
  2053. ]
  2054. },
  2055. "execution_count": 74,
  2056. "metadata": {},
  2057. "output_type": "execute_result"
  2058. }
  2059. ],
  2060. "source": [
  2061. "b = t.Tensor(a) # 也可以直接将numpy对象传入Tensor\n",
  2062. "b"
  2063. ]
  2064. },
  2065. {
  2066. "cell_type": "code",
  2067. "execution_count": 75,
  2068. "metadata": {
  2069. "scrolled": true
  2070. },
  2071. "outputs": [
  2072. {
  2073. "data": {
  2074. "text/plain": [
  2075. "\n",
  2076. " 1 100 1\n",
  2077. " 1 1 1\n",
  2078. "[torch.FloatTensor of size 2x3]"
  2079. ]
  2080. },
  2081. "execution_count": 75,
  2082. "metadata": {},
  2083. "output_type": "execute_result"
  2084. }
  2085. ],
  2086. "source": [
  2087. "a[0, 1]=100\n",
  2088. "b"
  2089. ]
  2090. },
  2091. {
  2092. "cell_type": "code",
  2093. "execution_count": 76,
  2094. "metadata": {},
  2095. "outputs": [
  2096. {
  2097. "data": {
  2098. "text/plain": [
  2099. "array([[ 1., 100., 1.],\n",
  2100. " [ 1., 1., 1.]], dtype=float32)"
  2101. ]
  2102. },
  2103. "execution_count": 76,
  2104. "metadata": {},
  2105. "output_type": "execute_result"
  2106. }
  2107. ],
  2108. "source": [
  2109. "c = b.numpy() # a, b, c三个对象共享内存\n",
  2110. "c"
  2111. ]
  2112. },
  2113. {
  2114. "cell_type": "markdown",
  2115. "metadata": {},
  2116. "source": [
  2117. "**注意**: 当numpy的数据类型和Tensor的类型不一样的时候,数据会被复制,不会共享内存。"
  2118. ]
  2119. },
  2120. {
  2121. "cell_type": "code",
  2122. "execution_count": 77,
  2123. "metadata": {},
  2124. "outputs": [
  2125. {
  2126. "data": {
  2127. "text/plain": [
  2128. "array([[1., 1., 1.],\n",
  2129. " [1., 1., 1.]])"
  2130. ]
  2131. },
  2132. "execution_count": 77,
  2133. "metadata": {},
  2134. "output_type": "execute_result"
  2135. }
  2136. ],
  2137. "source": [
  2138. "a = np.ones([2, 3])\n",
  2139. "a # 注意和上面的a的区别(dtype不是float32)"
  2140. ]
  2141. },
  2142. {
  2143. "cell_type": "code",
  2144. "execution_count": 78,
  2145. "metadata": {},
  2146. "outputs": [
  2147. {
  2148. "data": {
  2149. "text/plain": [
  2150. "\n",
  2151. " 1 1 1\n",
  2152. " 1 1 1\n",
  2153. "[torch.FloatTensor of size 2x3]"
  2154. ]
  2155. },
  2156. "execution_count": 78,
  2157. "metadata": {},
  2158. "output_type": "execute_result"
  2159. }
  2160. ],
  2161. "source": [
  2162. "b = t.Tensor(a) # FloatTensor(double64或者float64)\n",
  2163. "b"
  2164. ]
  2165. },
  2166. {
  2167. "cell_type": "code",
  2168. "execution_count": 79,
  2169. "metadata": {},
  2170. "outputs": [
  2171. {
  2172. "data": {
  2173. "text/plain": [
  2174. "\n",
  2175. " 1 1 1\n",
  2176. " 1 1 1\n",
  2177. "[torch.DoubleTensor of size 2x3]"
  2178. ]
  2179. },
  2180. "execution_count": 79,
  2181. "metadata": {},
  2182. "output_type": "execute_result"
  2183. }
  2184. ],
  2185. "source": [
  2186. "c = t.from_numpy(a) # 注意c的类型(DoubleTensor)\n",
  2187. "c"
  2188. ]
  2189. },
  2190. {
  2191. "cell_type": "code",
  2192. "execution_count": 80,
  2193. "metadata": {},
  2194. "outputs": [
  2195. {
  2196. "data": {
  2197. "text/plain": [
  2198. "\n",
  2199. " 1 1 1\n",
  2200. " 1 1 1\n",
  2201. "[torch.FloatTensor of size 2x3]"
  2202. ]
  2203. },
  2204. "execution_count": 80,
  2205. "metadata": {},
  2206. "output_type": "execute_result"
  2207. }
  2208. ],
  2209. "source": [
  2210. "a[0, 1] = 100\n",
  2211. "b # b与a不通向内存,所以即使a改变了,b也不变"
  2212. ]
  2213. },
  2214. {
  2215. "cell_type": "code",
  2216. "execution_count": 81,
  2217. "metadata": {},
  2218. "outputs": [
  2219. {
  2220. "data": {
  2221. "text/plain": [
  2222. "\n",
  2223. " 1 100 1\n",
  2224. " 1 1 1\n",
  2225. "[torch.DoubleTensor of size 2x3]"
  2226. ]
  2227. },
  2228. "execution_count": 81,
  2229. "metadata": {},
  2230. "output_type": "execute_result"
  2231. }
  2232. ],
  2233. "source": [
  2234. "c # c与a共享内存"
  2235. ]
  2236. },
  2237. {
  2238. "cell_type": "markdown",
  2239. "metadata": {},
  2240. "source": [
  2241. "广播法则(broadcast)是科学运算中经常使用的一个技巧,它在快速执行向量化的同时不会占用额外的内存/显存。\n",
  2242. "Numpy的广播法则定义如下:\n",
  2243. "\n",
  2244. "- 让所有输入数组都向其中shape最长的数组看齐,shape中不足的部分通过在前面加1补齐\n",
  2245. "- 两个数组要么在某一个维度的长度一致,要么其中一个为1,否则不能计算 \n",
  2246. "- 当输入数组的某个维度的长度为1时,计算时沿此维度复制扩充成一样的形状\n",
  2247. "\n",
  2248. "PyTorch当前已经支持了自动广播法则,但是笔者还是建议读者通过以下两个函数的组合手动实现广播法则,这样更直观,更不易出错:\n",
  2249. "\n",
  2250. "- `unsqueeze`或者`view`:为数据某一维的形状补1,实现法则1\n",
  2251. "- `expand`或者`expand_as`,重复数组,实现法则3;该操作不会复制数组,所以不会占用额外的空间。\n",
  2252. "\n",
  2253. "注意,repeat实现与expand相类似的功能,但是repeat会把相同数据复制多份,因此会占用额外的空间。"
  2254. ]
  2255. },
  2256. {
  2257. "cell_type": "code",
  2258. "execution_count": 82,
  2259. "metadata": {
  2260. "scrolled": true
  2261. },
  2262. "outputs": [],
  2263. "source": [
  2264. "a = t.ones(3, 2)\n",
  2265. "b = t.zeros(2, 3,1)"
  2266. ]
  2267. },
  2268. {
  2269. "cell_type": "code",
  2270. "execution_count": 83,
  2271. "metadata": {
  2272. "scrolled": true
  2273. },
  2274. "outputs": [
  2275. {
  2276. "data": {
  2277. "text/plain": [
  2278. "\n",
  2279. "(0 ,.,.) = \n",
  2280. " 1 1\n",
  2281. " 1 1\n",
  2282. " 1 1\n",
  2283. "\n",
  2284. "(1 ,.,.) = \n",
  2285. " 1 1\n",
  2286. " 1 1\n",
  2287. " 1 1\n",
  2288. "[torch.FloatTensor of size 2x3x2]"
  2289. ]
  2290. },
  2291. "execution_count": 83,
  2292. "metadata": {},
  2293. "output_type": "execute_result"
  2294. }
  2295. ],
  2296. "source": [
  2297. "# 自动广播法则\n",
  2298. "# 第一步:a是2维,b是3维,所以先在较小的a前面补1 ,\n",
  2299. "# 即:a.unsqueeze(0),a的形状变成(1,3,2),b的形状是(2,3,1),\n",
  2300. "# 第二步: a和b在第一维和第三维形状不一样,其中一个为1 ,\n",
  2301. "# 可以利用广播法则扩展,两个形状都变成了(2,3,2)\n",
  2302. "a+b"
  2303. ]
  2304. },
  2305. {
  2306. "cell_type": "code",
  2307. "execution_count": 84,
  2308. "metadata": {},
  2309. "outputs": [
  2310. {
  2311. "data": {
  2312. "text/plain": [
  2313. "\n",
  2314. "(0 ,.,.) = \n",
  2315. " 1 1\n",
  2316. " 1 1\n",
  2317. " 1 1\n",
  2318. "\n",
  2319. "(1 ,.,.) = \n",
  2320. " 1 1\n",
  2321. " 1 1\n",
  2322. " 1 1\n",
  2323. "[torch.FloatTensor of size 2x3x2]"
  2324. ]
  2325. },
  2326. "execution_count": 84,
  2327. "metadata": {},
  2328. "output_type": "execute_result"
  2329. }
  2330. ],
  2331. "source": [
  2332. "# 手动广播法则\n",
  2333. "# 或者 a.view(1,3,2).expand(2,3,2)+b.expand(2,3,2)\n",
  2334. "a.unsqueeze(0).expand(2, 3, 2) + b.expand(2,3,2)"
  2335. ]
  2336. },
  2337. {
  2338. "cell_type": "code",
  2339. "execution_count": 85,
  2340. "metadata": {},
  2341. "outputs": [],
  2342. "source": [
  2343. "# expand不会占用额外空间,只会在需要的时候才扩充,可极大节省内存\n",
  2344. "e = a.unsqueeze(0).expand(10000000000000, 3,2)"
  2345. ]
  2346. },
  2347. {
  2348. "cell_type": "markdown",
  2349. "metadata": {},
  2350. "source": [
  2351. "### 3.1.3 内部结构\n",
  2352. "\n",
  2353. "tensor的数据结构如图3-1所示。tensor分为头信息区(Tensor)和存储区(Storage),信息区主要保存着tensor的形状(size)、步长(stride)、数据类型(type)等信息,而真正的数据则保存成连续数组。由于数据动辄成千上万,因此信息区元素占用内存较少,主要内存占用则取决于tensor中元素的数目,也即存储区的大小。\n",
  2354. "\n",
  2355. "一般来说一个tensor有着与之相对应的storage, storage是在data之上封装的接口,便于使用,而不同tensor的头信息一般不同,但却可能使用相同的数据。下面看两个例子。\n",
  2356. "\n",
  2357. "![图3-1: Tensor的数据结构](imgs/tensor_data_structure.svg)"
  2358. ]
  2359. },
  2360. {
  2361. "cell_type": "code",
  2362. "execution_count": 86,
  2363. "metadata": {},
  2364. "outputs": [
  2365. {
  2366. "data": {
  2367. "text/plain": [
  2368. " 0.0\n",
  2369. " 1.0\n",
  2370. " 2.0\n",
  2371. " 3.0\n",
  2372. " 4.0\n",
  2373. " 5.0\n",
  2374. "[torch.FloatStorage of size 6]"
  2375. ]
  2376. },
  2377. "execution_count": 86,
  2378. "metadata": {},
  2379. "output_type": "execute_result"
  2380. }
  2381. ],
  2382. "source": [
  2383. "a = t.arange(0, 6)\n",
  2384. "a.storage()"
  2385. ]
  2386. },
  2387. {
  2388. "cell_type": "code",
  2389. "execution_count": 87,
  2390. "metadata": {},
  2391. "outputs": [
  2392. {
  2393. "data": {
  2394. "text/plain": [
  2395. " 0.0\n",
  2396. " 1.0\n",
  2397. " 2.0\n",
  2398. " 3.0\n",
  2399. " 4.0\n",
  2400. " 5.0\n",
  2401. "[torch.FloatStorage of size 6]"
  2402. ]
  2403. },
  2404. "execution_count": 87,
  2405. "metadata": {},
  2406. "output_type": "execute_result"
  2407. }
  2408. ],
  2409. "source": [
  2410. "b = a.view(2, 3)\n",
  2411. "b.storage()"
  2412. ]
  2413. },
  2414. {
  2415. "cell_type": "code",
  2416. "execution_count": 88,
  2417. "metadata": {},
  2418. "outputs": [
  2419. {
  2420. "data": {
  2421. "text/plain": [
  2422. "True"
  2423. ]
  2424. },
  2425. "execution_count": 88,
  2426. "metadata": {},
  2427. "output_type": "execute_result"
  2428. }
  2429. ],
  2430. "source": [
  2431. "# 一个对象的id值可以看作它在内存中的地址\n",
  2432. "# storage的内存地址一样,即是同一个storage\n",
  2433. "id(b.storage()) == id(a.storage())"
  2434. ]
  2435. },
  2436. {
  2437. "cell_type": "code",
  2438. "execution_count": 89,
  2439. "metadata": {},
  2440. "outputs": [
  2441. {
  2442. "data": {
  2443. "text/plain": [
  2444. "\n",
  2445. " 0 100 2\n",
  2446. " 3 4 5\n",
  2447. "[torch.FloatTensor of size 2x3]"
  2448. ]
  2449. },
  2450. "execution_count": 89,
  2451. "metadata": {},
  2452. "output_type": "execute_result"
  2453. }
  2454. ],
  2455. "source": [
  2456. "# a改变,b也随之改变,因为他们共享storage\n",
  2457. "a[1] = 100\n",
  2458. "b"
  2459. ]
  2460. },
  2461. {
  2462. "cell_type": "code",
  2463. "execution_count": 90,
  2464. "metadata": {},
  2465. "outputs": [
  2466. {
  2467. "data": {
  2468. "text/plain": [
  2469. " 0.0\n",
  2470. " 100.0\n",
  2471. " 2.0\n",
  2472. " 3.0\n",
  2473. " 4.0\n",
  2474. " 5.0\n",
  2475. "[torch.FloatStorage of size 6]"
  2476. ]
  2477. },
  2478. "execution_count": 90,
  2479. "metadata": {},
  2480. "output_type": "execute_result"
  2481. }
  2482. ],
  2483. "source": [
  2484. "c = a[2:] \n",
  2485. "c.storage()"
  2486. ]
  2487. },
  2488. {
  2489. "cell_type": "code",
  2490. "execution_count": 91,
  2491. "metadata": {},
  2492. "outputs": [
  2493. {
  2494. "data": {
  2495. "text/plain": [
  2496. "(94139619931688, 94139619931680)"
  2497. ]
  2498. },
  2499. "execution_count": 91,
  2500. "metadata": {},
  2501. "output_type": "execute_result"
  2502. }
  2503. ],
  2504. "source": [
  2505. "c.data_ptr(), a.data_ptr() # data_ptr返回tensor首元素的内存地址\n",
  2506. "# 可以看出相差8,这是因为2*4=8--相差两个元素,每个元素占4个字节(float)"
  2507. ]
  2508. },
  2509. {
  2510. "cell_type": "code",
  2511. "execution_count": 92,
  2512. "metadata": {},
  2513. "outputs": [
  2514. {
  2515. "data": {
  2516. "text/plain": [
  2517. "\n",
  2518. " 0\n",
  2519. " 100\n",
  2520. "-100\n",
  2521. " 3\n",
  2522. " 4\n",
  2523. " 5\n",
  2524. "[torch.FloatTensor of size 6]"
  2525. ]
  2526. },
  2527. "execution_count": 92,
  2528. "metadata": {},
  2529. "output_type": "execute_result"
  2530. }
  2531. ],
  2532. "source": [
  2533. "c[0] = -100 # c[0]的内存地址对应a[2]的内存地址\n",
  2534. "a"
  2535. ]
  2536. },
  2537. {
  2538. "cell_type": "code",
  2539. "execution_count": 93,
  2540. "metadata": {},
  2541. "outputs": [
  2542. {
  2543. "data": {
  2544. "text/plain": [
  2545. "\n",
  2546. " 6666 100 -100\n",
  2547. " 3 4 5\n",
  2548. "[torch.FloatTensor of size 2x3]"
  2549. ]
  2550. },
  2551. "execution_count": 93,
  2552. "metadata": {},
  2553. "output_type": "execute_result"
  2554. }
  2555. ],
  2556. "source": [
  2557. "d = t.Tensor(c.storage())\n",
  2558. "d[0] = 6666\n",
  2559. "b"
  2560. ]
  2561. },
  2562. {
  2563. "cell_type": "code",
  2564. "execution_count": 94,
  2565. "metadata": {},
  2566. "outputs": [
  2567. {
  2568. "data": {
  2569. "text/plain": [
  2570. "True"
  2571. ]
  2572. },
  2573. "execution_count": 94,
  2574. "metadata": {},
  2575. "output_type": "execute_result"
  2576. }
  2577. ],
  2578. "source": [
  2579. "# 下面4个tensor共享storage\n",
  2580. "id(a.storage()) == id(b.storage()) == id(c.storage()) == id(d.storage())"
  2581. ]
  2582. },
  2583. {
  2584. "cell_type": "code",
  2585. "execution_count": 95,
  2586. "metadata": {},
  2587. "outputs": [
  2588. {
  2589. "data": {
  2590. "text/plain": [
  2591. "(0, 2, 0)"
  2592. ]
  2593. },
  2594. "execution_count": 95,
  2595. "metadata": {},
  2596. "output_type": "execute_result"
  2597. }
  2598. ],
  2599. "source": [
  2600. "a.storage_offset(), c.storage_offset(), d.storage_offset()"
  2601. ]
  2602. },
  2603. {
  2604. "cell_type": "code",
  2605. "execution_count": 96,
  2606. "metadata": {},
  2607. "outputs": [
  2608. {
  2609. "data": {
  2610. "text/plain": [
  2611. "True"
  2612. ]
  2613. },
  2614. "execution_count": 96,
  2615. "metadata": {},
  2616. "output_type": "execute_result"
  2617. }
  2618. ],
  2619. "source": [
  2620. "e = b[::2, ::2] # 隔2行/列取一个元素\n",
  2621. "id(e.storage()) == id(a.storage())"
  2622. ]
  2623. },
  2624. {
  2625. "cell_type": "code",
  2626. "execution_count": 97,
  2627. "metadata": {},
  2628. "outputs": [
  2629. {
  2630. "data": {
  2631. "text/plain": [
  2632. "((3, 1), (6, 2))"
  2633. ]
  2634. },
  2635. "execution_count": 97,
  2636. "metadata": {},
  2637. "output_type": "execute_result"
  2638. }
  2639. ],
  2640. "source": [
  2641. "b.stride(), e.stride()"
  2642. ]
  2643. },
  2644. {
  2645. "cell_type": "code",
  2646. "execution_count": 98,
  2647. "metadata": {},
  2648. "outputs": [
  2649. {
  2650. "data": {
  2651. "text/plain": [
  2652. "False"
  2653. ]
  2654. },
  2655. "execution_count": 98,
  2656. "metadata": {},
  2657. "output_type": "execute_result"
  2658. }
  2659. ],
  2660. "source": [
  2661. "e.is_contiguous()"
  2662. ]
  2663. },
  2664. {
  2665. "cell_type": "markdown",
  2666. "metadata": {},
  2667. "source": [
  2668. "可见绝大多数操作并不修改tensor的数据,而只是修改了tensor的头信息。这种做法更节省内存,同时提升了处理速度。在使用中需要注意。\n",
  2669. "此外有些操作会导致tensor不连续,这时需调用`tensor.contiguous`方法将它们变成连续的数据,该方法会使数据复制一份,不再与原来的数据共享storage。\n",
  2670. "另外读者可以思考一下,之前说过的高级索引一般不共享stroage,而普通索引共享storage,这是为什么?(提示:普通索引可以通过只修改tensor的offset,stride和size,而不修改storage来实现)。"
  2671. ]
  2672. },
  2673. {
  2674. "cell_type": "markdown",
  2675. "metadata": {},
  2676. "source": [
  2677. "### 3.1.4 其它有关Tensor的话题\n",
  2678. "这部分的内容不好专门划分一小节,但是笔者认为仍值得读者注意,故而将其放在这一小节。"
  2679. ]
  2680. },
  2681. {
  2682. "cell_type": "markdown",
  2683. "metadata": {},
  2684. "source": [
  2685. "#### 持久化\n",
  2686. "Tensor的保存和加载十分的简单,使用t.save和t.load即可完成相应的功能。在save/load时可指定使用的`pickle`模块,在load时还可将GPU tensor映射到CPU或其它GPU上。"
  2687. ]
  2688. },
  2689. {
  2690. "cell_type": "code",
  2691. "execution_count": 99,
  2692. "metadata": {
  2693. "scrolled": true
  2694. },
  2695. "outputs": [],
  2696. "source": [
  2697. "if t.cuda.is_available():\n",
  2698. " a = a.cuda(1) # 把a转为GPU1上的tensor,\n",
  2699. " t.save(a,'a.pth')\n",
  2700. "\n",
  2701. " # 加载为b, 存储于GPU1上(因为保存时tensor就在GPU1上)\n",
  2702. " b = t.load('a.pth')\n",
  2703. " # 加载为c, 存储于CPU\n",
  2704. " c = t.load('a.pth', map_location=lambda storage, loc: storage)\n",
  2705. " # 加载为d, 存储于GPU0上\n",
  2706. " d = t.load('a.pth', map_location={'cuda:1':'cuda:0'})"
  2707. ]
  2708. },
  2709. {
  2710. "cell_type": "markdown",
  2711. "metadata": {},
  2712. "source": [
  2713. "#### 向量化"
  2714. ]
  2715. },
  2716. {
  2717. "cell_type": "markdown",
  2718. "metadata": {},
  2719. "source": [
  2720. "向量化计算是一种特殊的并行计算方式,相对于一般程序在同一时间只执行一个操作的方式,它可在同一时间执行多个操作,通常是对不同的数据执行同样的一个或一批指令,或者说把指令应用于一个数组/向量上。向量化可极大提高科学运算的效率,Python本身是一门高级语言,使用很方便,但这也意味着很多操作很低效,尤其是`for`循环。在科学计算程序中应当极力避免使用Python原生的`for循环`。"
  2721. ]
  2722. },
  2723. {
  2724. "cell_type": "code",
  2725. "execution_count": 100,
  2726. "metadata": {},
  2727. "outputs": [],
  2728. "source": [
  2729. "def for_loop_add(x, y):\n",
  2730. " result = []\n",
  2731. " for i,j in zip(x, y):\n",
  2732. " result.append(i + j)\n",
  2733. " return t.Tensor(result)"
  2734. ]
  2735. },
  2736. {
  2737. "cell_type": "code",
  2738. "execution_count": 101,
  2739. "metadata": {
  2740. "scrolled": true
  2741. },
  2742. "outputs": [
  2743. {
  2744. "name": "stdout",
  2745. "output_type": "stream",
  2746. "text": [
  2747. "222 µs ± 81.9 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n",
  2748. "The slowest run took 11.03 times longer than the fastest. This could mean that an intermediate result is being cached.\n",
  2749. "5.58 µs ± 7.27 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
  2750. ]
  2751. }
  2752. ],
  2753. "source": [
  2754. "x = t.zeros(100)\n",
  2755. "y = t.ones(100)\n",
  2756. "%timeit -n 10 for_loop_add(x, y)\n",
  2757. "%timeit -n 10 x + y"
  2758. ]
  2759. },
  2760. {
  2761. "cell_type": "markdown",
  2762. "metadata": {},
  2763. "source": [
  2764. "可见二者有超过40倍的速度差距,因此在实际使用中应尽量调用内建函数(buildin-function),这些函数底层由C/C++实现,能通过执行底层优化实现高效计算。因此在平时写代码时,就应养成向量化的思维习惯。"
  2765. ]
  2766. },
  2767. {
  2768. "cell_type": "markdown",
  2769. "metadata": {},
  2770. "source": [
  2771. "此外还有以下几点需要注意:\n",
  2772. "- 大多数`t.function`都有一个参数`out`,这时候产生的结果将保存在out指定tensor之中。\n",
  2773. "- `t.set_num_threads`可以设置PyTorch进行CPU多线程并行计算时候所占用的线程数,这个可以用来限制PyTorch所占用的CPU数目。\n",
  2774. "- `t.set_printoptions`可以用来设置打印tensor时的数值精度和格式。\n",
  2775. "下面举例说明。"
  2776. ]
  2777. },
  2778. {
  2779. "cell_type": "code",
  2780. "execution_count": 102,
  2781. "metadata": {},
  2782. "outputs": [
  2783. {
  2784. "name": "stdout",
  2785. "output_type": "stream",
  2786. "text": [
  2787. "16777216.0 16777216.0\n"
  2788. ]
  2789. },
  2790. {
  2791. "data": {
  2792. "text/plain": [
  2793. "(199999, 199998)"
  2794. ]
  2795. },
  2796. "execution_count": 102,
  2797. "metadata": {},
  2798. "output_type": "execute_result"
  2799. }
  2800. ],
  2801. "source": [
  2802. "a = t.arange(0, 20000000)\n",
  2803. "print(a[-1], a[-2]) # 32bit的IntTensor精度有限导致溢出\n",
  2804. "b = t.LongTensor()\n",
  2805. "t.arange(0, 200000, out=b) # 64bit的LongTensor不会溢出\n",
  2806. "b[-1],b[-2]"
  2807. ]
  2808. },
  2809. {
  2810. "cell_type": "code",
  2811. "execution_count": 103,
  2812. "metadata": {},
  2813. "outputs": [
  2814. {
  2815. "data": {
  2816. "text/plain": [
  2817. "\n",
  2818. "-0.6379 0.5422 0.0413\n",
  2819. " 0.4575 0.8977 2.3465\n",
  2820. "[torch.FloatTensor of size 2x3]"
  2821. ]
  2822. },
  2823. "execution_count": 103,
  2824. "metadata": {},
  2825. "output_type": "execute_result"
  2826. }
  2827. ],
  2828. "source": [
  2829. "a = t.randn(2,3)\n",
  2830. "a"
  2831. ]
  2832. },
  2833. {
  2834. "cell_type": "code",
  2835. "execution_count": 104,
  2836. "metadata": {
  2837. "scrolled": false
  2838. },
  2839. "outputs": [
  2840. {
  2841. "data": {
  2842. "text/plain": [
  2843. "\n",
  2844. "-0.6378980875 0.5421655774 0.0412697867\n",
  2845. "0.4574612975 0.8976946473 2.3464736938\n",
  2846. "[torch.FloatTensor of size 2x3]"
  2847. ]
  2848. },
  2849. "execution_count": 104,
  2850. "metadata": {},
  2851. "output_type": "execute_result"
  2852. }
  2853. ],
  2854. "source": [
  2855. "t.set_printoptions(precision=10)\n",
  2856. "a"
  2857. ]
  2858. },
  2859. {
  2860. "cell_type": "markdown",
  2861. "metadata": {},
  2862. "source": [
  2863. "### 3.1.5 小试牛刀:线性回归"
  2864. ]
  2865. },
  2866. {
  2867. "cell_type": "markdown",
  2868. "metadata": {},
  2869. "source": [
  2870. "线性回归是机器学习入门知识,应用十分广泛。线性回归利用数理统计中回归分析,来确定两种或两种以上变量间相互依赖的定量关系的,其表达形式为$y = wx+b+e$,$e$为误差服从均值为0的正态分布。首先让我们来确认线性回归的损失函数:\n",
  2871. "$$\n",
  2872. "loss = \\sum_i^N \\frac 1 2 ({y_i-(wx_i+b)})^2\n",
  2873. "$$\n",
  2874. "然后利用随机梯度下降法更新参数$\\textbf{w}$和$\\textbf{b}$来最小化损失函数,最终学得$\\textbf{w}$和$\\textbf{b}$的数值。"
  2875. ]
  2876. },
  2877. {
  2878. "cell_type": "code",
  2879. "execution_count": 105,
  2880. "metadata": {},
  2881. "outputs": [],
  2882. "source": [
  2883. "import torch as t\n",
  2884. "%matplotlib inline\n",
  2885. "from matplotlib import pyplot as plt\n",
  2886. "from IPython import display"
  2887. ]
  2888. },
  2889. {
  2890. "cell_type": "code",
  2891. "execution_count": 106,
  2892. "metadata": {},
  2893. "outputs": [],
  2894. "source": [
  2895. "# 设置随机数种子,保证在不同电脑上运行时下面的输出一致\n",
  2896. "t.manual_seed(1000) \n",
  2897. "\n",
  2898. "def get_fake_data(batch_size=8):\n",
  2899. " ''' 产生随机数据:y=x*2+3,加上了一些噪声'''\n",
  2900. " x = t.rand(batch_size, 1) * 20\n",
  2901. " y = x * 2 + (1 + t.randn(batch_size, 1))*3\n",
  2902. " return x, y"
  2903. ]
  2904. },
  2905. {
  2906. "cell_type": "code",
  2907. "execution_count": 107,
  2908. "metadata": {},
  2909. "outputs": [
  2910. {
  2911. "data": {
  2912. "text/plain": [
  2913. "<matplotlib.collections.PathCollection at 0x7f7b4a4115c0>"
  2914. ]
  2915. },
  2916. "execution_count": 107,
  2917. "metadata": {},
  2918. "output_type": "execute_result"
  2919. },
  2920. {
  2921. "data": {
  2922. "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD8CAYAAABn919SAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAD11JREFUeJzt3V+MXGd9xvHvU8eU5U+1gWxQvEAN\nKHKpSLHpKkobKaJA64AQMVFRSVtktbShEqhQkEVML4CLKkHmj6peRAokTS5oVArGQS3FWCFtWqmk\n3eAQO3XdFMqfrN14KSzQsqKO+fVix2Bv1t6Z9c7OzLvfj7SamXfP6DxaK0/mvOedc1JVSJJG308N\nOoAkaXVY6JLUCAtdkhphoUtSIyx0SWqEhS5JjbDQJakRFrokNcJCl6RGXLSWO7vkkktq8+bNa7lL\nSRp5Dz744LeqamK57da00Ddv3sz09PRa7lKSRl6Sr3eznVMuktQIC12SGmGhS1Ijli30JE9N8s9J\nvpzkkSTv74y/IMkDSR5N8pdJntL/uJKkc+nmE/oPgVdU1UuBrcC1Sa4CPgB8pKouB74DvLl/MSVJ\ny1l2lUst3AHjfzovN3Z+CngF8Jud8buA9wG3rn5ESRpN+w7OsGf/UY7NzbNpfIxd27ewY9tk3/bX\n1Rx6kg1JHgJOAAeArwBzVfVEZ5PHgP6llKQRs+/gDLv3HmJmbp4CZubm2b33EPsOzvRtn10VelWd\nqqqtwHOBK4EXL7XZUu9NcmOS6STTs7OzK08qSSNkz/6jzJ88ddbY/MlT7Nl/tG/77GmVS1XNAX8H\nXAWMJzk9ZfNc4Ng53nNbVU1V1dTExLJfdJKkJhybm+9pfDV0s8plIsl45/kY8CrgCHAf8OudzXYC\n9/QrpCSNmk3jYz2Nr4ZuPqFfBtyX5GHgX4ADVfXXwLuBdyb5D+DZwO19SylJI2bX9i2Mbdxw1tjY\nxg3s2r6lb/vsZpXLw8C2Jca/ysJ8uiRpkdOrWdZylcuaXpxLktaTHdsm+1rgi/nVf0lqhIUuSY2w\n0CWpERa6JDXCQpekRljoktQIC12SGmGhS1IjLHRJaoSFLkmNsNAlqREWuiQ1wkKXpEZY6JLUCAtd\nkhphoUtSIyx0SWqEhS5JjbDQJakRFrokNcJCl6RGWOiS1AgLXZIaYaFLUiMsdElqhIUuSY2w0CWp\nERa6JDXCQpekRljoktQIC12SGmGhS1IjLHRJaoSFLkmNsNAlqRHLFnqS5yW5L8mRJI8keXtn/H1J\nZpI81Pl5Tf/jSpLO5aIutnkCeFdVfSnJM4EHkxzo/O4jVfXB/sWTJHVr2UKvquPA8c7z7yc5Akz2\nO5gkqTc9zaEn2QxsAx7oDL0tycNJ7khy8SpnkyT1oOtCT/IM4FPAO6rqe8CtwIuArSx8gv/QOd53\nY5LpJNOzs7OrEFmStJSuCj3JRhbK/ONVtRegqh6vqlNV9SPgo8CVS723qm6rqqmqmpqYmFit3JKk\nRbpZ5RLgduBIVX34jPHLztjs9cDh1Y8nSepWN6tcrgbeBBxK8lBn7D3ADUm2AgV8DXhLXxJKkrrS\nzSqXfwSyxK8+u/pxJEkr5TdFJakRFrokNcJCl6RGdHNSVGrSvoMz7Nl/lGNz82waH2PX9i3s2OaX\noDW6LHStS/sOzrB77yHmT54CYGZunt17DwFY6hpZTrloXdqz/+iPy/y0+ZOn2LP/6IASSRfOQte6\ndGxuvqdxaRRY6FqXNo2P9TQujQILXevSru1bGNu44ayxsY0b2LV9y4ASSRfOk6Jal06f+HSVi1pi\noWvd2rFt0gJXU5xykaRGWOiS1AgLXZIaYaFLUiMsdElqhKtcJKlHw3phNwtdknowzBd2c8pFknow\nzBd2s9AlqQfDfGE3C12SejDMF3az0CWpB8N8YTdPikpSD4b5wm4WuiT1aFgv7OaUiyQ1wkKXpEZY\n6JLUCAtdkhphoUtSIyx0SWqEhS5JjbDQJakRFrokNcJCl6RGWOiS1AgLXZIasWyhJ3lekvuSHEny\nSJK3d8afleRAkkc7jxf3P64k6Vy6+YT+BPCuqnoxcBXw1iQ/D9wE3FtVlwP3dl5rBO07OMPVt3yB\nF9z0N1x9yxfYd3Bm0JEkrcCyhV5Vx6vqS53n3weOAJPAdcBdnc3uAnb0K6T65/QNb2fm5il+csNb\nS10aPT3NoSfZDGwDHgCeU1XHYaH0gUtXO5z6b5hveCupN10XepJnAJ8C3lFV3+vhfTcmmU4yPTs7\nu5KM6qNhvuGtpN50VehJNrJQ5h+vqr2d4ceTXNb5/WXAiaXeW1W3VdVUVU1NTEysRmatomG+4a2k\n3nSzyiXA7cCRqvrwGb/6DLCz83wncM/qx1O/DfMNbyX1ppt7il4NvAk4lOShzth7gFuATyR5M/AN\n4A39iah+GuYb3krqTapqzXY2NTVV09PTa7Y/SWpBkgeramq57fymqCQ1wkKXpEZY6JLUCAtdkhph\noUtSI7pZtqhVsu/gjMsDJfWNhb5GTl8E6/R1U05fBAuw1CWtCgt9jZzvIlgW+uB41KSWWOhrxItg\nDR+PmtQaT4quES+CNXy8dLBaY6GvES+CNXw8alJrLPQ1smPbJDdffwWT42MEmBwf4+brr/DQfoA8\nalJrnENfQzu2TVrgQ2TX9i1nzaGDR00abRa61i0vHazWWOha1zxqUkucQ5ekRljoktQIC12SGmGh\nS1IjLHRJaoSFLkmNsNAlqREWuiQ1wkKXpEZY6JLUCAtdkhphoUtSIyx0SWqEhS5JjbDQJakRFrok\nNWIkbnCx7+CMd5WRpGUMfaHvOzhz1n0fZ+bm2b33EIClLklnGPoplz37j551E1+A+ZOn2LP/6IAS\nSdJwGvpCPzY339O4JK1XQ1/om8bHehqXpPVq2UJPckeSE0kOnzH2viQzSR7q/LymXwF3bd/C2MYN\nZ42NbdzAru1b+rVLSRpJ3XxCvxO4donxj1TV1s7PZ1c31k/s2DbJzddfweT4GAEmx8e4+forPCEq\nSYssu8qlqu5Psrn/Uc5tx7ZJC1ySlnEhc+hvS/JwZ0rm4lVLJElakZUW+q3Ai4CtwHHgQ+faMMmN\nSaaTTM/Ozq5wd5Kk5ayo0Kvq8ao6VVU/Aj4KXHmebW+rqqmqmpqYmFhpTknSMlZU6EkuO+Pl64HD\n59pWkrQ2lj0pmuRu4OXAJUkeA94LvDzJVqCArwFv6WNGSVIXulnlcsMSw7f3IYsk6QIM/TdFJUnd\nsdAlqREWuiQ1wkKXpEZY6JLUCAtdkhphoUtSIyx0SWqEhS5JjbDQJakRFrokNcJCl6RGWOiS1AgL\nXZIaYaFLUiMsdElqhIUuSY2w0CWpERa6JDXCQpekRljoktQIC12SGmGhS1IjLHRJaoSFLkmNsNAl\nqREWuiQ1wkKXpEZY6JLUCAtdkhphoUtSIyx0SWqEhS5JjbDQJakRFrokNcJCl6RGLFvoSe5IciLJ\n4TPGnpXkQJJHO48X9zemJGk53XxCvxO4dtHYTcC9VXU5cG/ntSRpgJYt9Kq6H/j2ouHrgLs6z+8C\ndqxyLklSj1Y6h/6cqjoO0Hm8dPUiSZJWou8nRZPcmGQ6yfTs7Gy/dydJ69ZKC/3xJJcBdB5PnGvD\nqrqtqqaqampiYmKFu5MkLWelhf4ZYGfn+U7gntWJI0laqW6WLd4N/BOwJcljSd4M3AL8apJHgV/t\nvJYkDdBFy21QVTec41evXOUskqQL4DdFJakRFrokNcJCl6RGWOiS1AgLXZIaYaFLUiMsdElqhIUu\nSY2w0CWpERa6JDXCQpekRix7LZdRs+/gDHv2H+XY3DybxsfYtX0LO7ZNDjqWJPVdU4W+7+AMu/ce\nYv7kKQBm5ubZvfcQgKUuqXlNTbns2X/0x2V+2vzJU+zZf3RAiSRp7TRV6Mfm5nsal6SWNFXom8bH\nehqXpJY0Vei7tm9hbOOGs8bGNm5g1/YtA0okSWunqZOip098uspF0nrUVKHDQqlb4JLWo6amXCRp\nPbPQJakRFrokNcJCl6RGWOiS1IhU1drtLJkFvr7MZpcA31qDOBfCjKtnFHKacXWMQkYYzpw/W1UT\ny220poXejSTTVTU16BznY8bVMwo5zbg6RiEjjE7OpTjlIkmNsNAlqRHDWOi3DTpAF8y4ekYhpxlX\nxyhkhNHJ+SRDN4cuSVqZYfyELklagaEq9CRfS3IoyUNJpgedZylJxpN8Msm/JTmS5JcGnelMSbZ0\n/n6nf76X5B2DzrVYkj9K8kiSw0nuTvLUQWdaLMnbO/keGaa/YZI7kpxIcviMsWclOZDk0c7jxUOY\n8Q2dv+WPkgx8Fck5Mu7p/Lf9cJJPJxkfZMZeDVWhd/xKVW0d4mVDfwp8rqp+DngpcGTAec5SVUc7\nf7+twC8CPwA+PeBYZ0kyCfwhMFVVLwE2AG8cbKqzJXkJ8PvAlSz8O782yeWDTfVjdwLXLhq7Cbi3\nqi4H7u28HqQ7eXLGw8D1wP1rnmZpd/LkjAeAl1TVLwD/Duxe61AXYhgLfWgl+RngGuB2gKr6v6qa\nG2yq83ol8JWqWu7LXINwETCW5CLgacCxAedZ7MXAF6vqB1X1BPD3wOsHnAmAqrof+Pai4euAuzrP\n7wJ2rGmoRZbKWFVHqmpobvB7joyf7/x7A3wReO6aB7sAw1boBXw+yYNJbhx0mCW8EJgF/jzJwSQf\nS/L0QYc6jzcCdw86xGJVNQN8EPgGcBz4blV9frCpnuQwcE2SZyd5GvAa4HkDznQ+z6mq4wCdx0sH\nnKcFvwv87aBD9GLYCv3qqnoZ8GrgrUmuGXSgRS4CXgbcWlXbgP9l8Ie2S0ryFOB1wF8NOstinfnd\n64AXAJuApyf57cGmOltVHQE+wMIh+OeALwNPnPdNakaSP2bh3/vjg87Si6Eq9Ko61nk8wcK875WD\nTfQkjwGPVdUDndefZKHgh9GrgS9V1eODDrKEVwH/WVWzVXUS2Av88oAzPUlV3V5VL6uqa1g4NH90\n0JnO4/EklwF0Hk8MOM/ISrITeC3wWzVi67qHptCTPD3JM08/B36NhcPeoVFV/wV8M8npu06/EvjX\nAUY6nxsYwumWjm8AVyV5WpKw8HccqpPLAEku7Tw+n4WTecP69wT4DLCz83wncM8As4ysJNcC7wZe\nV1U/GHSeXg3NF4uSvJCfrMa4CPiLqvqTAUZaUpKtwMeApwBfBX6nqr4z2FRn68z5fhN4YVV9d9B5\nlpLk/cBvsHBYexD4var64WBTnS3JPwDPBk4C76yqewccCYAkdwMvZ+GqgI8D7wX2AZ8Ans/C/zDf\nUFWLT5wOOuO3gT8DJoA54KGq2j5kGXcDPw38d2ezL1bVHwwk4AoMTaFLki7M0Ey5SJIujIUuSY2w\n0CWpERa6JDXCQpekRljoktQIC12SGmGhS1Ij/h/CJYJPfXoR0gAAAABJRU5ErkJggg==\n",
  2923. "text/plain": [
  2924. "<Figure size 432x288 with 1 Axes>"
  2925. ]
  2926. },
  2927. "metadata": {},
  2928. "output_type": "display_data"
  2929. }
  2930. ],
  2931. "source": [
  2932. "# 来看看产生的x-y分布\n",
  2933. "x, y = get_fake_data()\n",
  2934. "plt.scatter(x.squeeze().numpy(), y.squeeze().numpy())"
  2935. ]
  2936. },
  2937. {
  2938. "cell_type": "code",
  2939. "execution_count": 108,
  2940. "metadata": {
  2941. "scrolled": false
  2942. },
  2943. "outputs": [
  2944. {
  2945. "data": {
  2946. "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAD8CAYAAAB0IB+mAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xd4VGXax/HvDQQINVSBQKQaQEDA\nCCgWVBTsiNvsroX1XX13fXcXKWJZK4p1d11d7O66lpUAFhSxgV1BMKGFjhBCJ9RA2vP+MRM3hEky\nyfSZ3+e6uDJz5kzm9nhyz5nnPPM75pxDRETiX51IFyAiIuGhhi8ikiDU8EVEEoQavohIglDDFxFJ\nEGr4IiIJQg1fRCRBqOGLiCQINXwRkQRRL5wv1rp1a9e5c+dwvqSISMjkHygiN7+A0gqJBY3q1+Xo\nVo2pV8cOW3fznoMUlZSSVLcO7Zo1JKVRkl+vs2DBgu3OuTaB1hvWht+5c2fmz58fzpcUEQmZoZM/\npji/4IjlqSnJfDH+jJ/uz1iYy4TMbFoXlfy0LCmpLpNG92XUgNRqX8fM1gejXr+HdMysrpktNLN3\nvPe7mNk3ZrbSzF43s/rBKEhEJFbk+mj2AJsqLJ8yO4eCcs0eoKCohCmzc0JWmy81GcP/PbCs3P0H\ngceccz2AXcB1wSxMRCRabd17kJte+b7SxzukJB92v+IbQHXLQ8Wvhm9mHYHzgGe99w04A3jTu8pL\nwKhQFCgiEi2cc7zx3QaGPzKXOcu2cF7f9jSsd3gbTU6qy9gR6Yctq/gGUN3yUPH3CP9x4Fag1Hu/\nFZDvnCv23t8IVD8QJSISo9Zt389lz3zDrdOy6NW+Ge///hSevHwgky/pR2pKMoZn7P4BH+PyY0ek\nk5xU97Blvt4YQq3ak7Zmdj6w1Tm3wMyGlS32sarPYH0zGwOMAUhLS6tlmSIikVFUUsqzn63l8Q9X\nUL9eHR4Y3ZdfZnSijncGzqgBqdWeeC17fMrsHDblF9AhJZmxI9L9OmEbTP7M0hkKXGhm5wINgWZ4\njvhTzKye9yi/I7DJ15Odc1OBqQAZGRm62oqIxIzsjbsZNy2LpXl7OKdPO/584bG0bdbwsHVmLMz1\nq5H788YQatU2fOfcBGACgPcI/0/OucvN7D/Az4DXgKuBmSGsU0QkbA4UFvPYnBU89/laWjdpwNNX\nHM/IPu2OWK9sumXZDJzc/AImZGYDRLy5+xLIPPxxwGtmdi+wEHguOCWJiETOZyu3MXF6Nht2FnDp\noDTGn9OT5sm+vyBV1XTLmG/4zrlPgU+9t9cAg4JfkohI+O3aX8g97y4l8/tcurZuzOtjhjC4a6sq\nnxMt0y39FdZv2oqIRBvnHG/9sIm7317K7oIi/veM7tx0encaVphV40uHlGSfX74K93RLf6nhi0jC\nys0vYNL0bD7J2cZxnVJ45ZK+9GzXzO/njx2RftgYPkRmuqW/1PBFJOGUlDr++dU6HvJGG9xxfm+u\nPqkzdev4mnFeuWiZbukvNXwRSSg5m/cybloWizbkMyy9DfeO6kPHFo1q/fuiYbqlv9TwRSQk/J2f\nHi4Hi0r4+yereGruapo2TOKJX/XnwuM64EmKSQxq+CISdNE2P/3btTsZn5nFmm37GT0glUnn96Zl\n48QL+FXDF5Ggi5b56XsOFvHge8t55Zsf6dgimZevHcSpxwR8HZGYpYYvIkEXDfPTP1iymdtnLmbb\n3kNcf3IX/nD2MTSqn9gtL7H/60UkJCI5P33rnoPc9fYSZmVvpme7pky9MoPjOqWE/HVjgRq+iARd\nJOanO+d4/bsN3DdrGYeKSxk7Ip0xp3YlqW5NrvMU39TwRSTowj0/fe32/UzIzOLrNTsZ3KUlD4zu\nS9c2TULyWrFMDV9EQiIc89OLSkp55rM1PP7hShrUq8Pk0X35RbmsejmcGr6IxKSsjfmMm5bNsrw9\nnNu3HXddcGRWvRxODV9EYsqBwmIe/WAFz3+xljZNG/CPK49nxLFHZtXLkdTwRSRmzFvhyarfuKuA\nywenMe6cnjRr6DurXo6khi8iEVGT6IWd+wu5952lZC7MpWubxrzxmxMZ1KVlmCuOfWr4IhJ2/kYv\nlGXV//ntpewpKOJ3Z3Tnt35m1cuR1PBFJOz8iV7YuOsAk2Ys5tOcbfTvlMLkGmbVy5Gqbfhm1hCY\nBzTwrv+mc+5OM3sROA3Y7V31GufcolAVKiLxo6rohZJSx8tfrWOKN6v+zgt6c9WJNc+qlyP5c4R/\nCDjDObfPzJKAz83sPe9jY51zb4auPBGJR5VFL7Rp2oDRT33JD0HKqpfDVfudY+exz3s3yfvPhbQq\nEYlrY0ekk1xhHL5eHWP7vkNs2HmAJ37VnxeuOUHNPsj8Cpkws7pmtgjYCsxxzn3jfeg+M8sys8fM\nrEElzx1jZvPNbP62bduCVLaIxLJRA1J5YHRfUr1havXqGMWljlEDUvnwD6dxUf/UhLowSbj41fCd\ncyXOuf5AR2CQmfUBJgA9gROAlsC4Sp471TmX4ZzLaNMmcXOoReRwZ/Rqy2npnp7QrnlDXr52EI/+\non9CXpgkXGo0S8c5l29mnwIjnXMPexcfMrMXgD8FuzgRiU+zl2zmDm9W/Q2ndOH/zlJWfTj4M0un\nDVDkbfbJwHDgQTNr75zLM8/nrlHA4hDXKiIxbsueg9w5cwnvL9lMr/bNeOaqDPp1VFZ9uPjzltoe\neMnM6uIZAnrDOfeOmX3sfTMwYBFwYwjrFJEYVlrqeH3+Bu6ftYzC4lLGjezJ9ad0UVZ9mFXb8J1z\nWcAAH8vPCElFIhJX1mzbx4TMbL5Zu5MhXVvywOh+dGndONJlJSQNmolISBSVlDJ13hqe+GglDevV\n4cFLPFn1mn0TOWr4IhJ0P2zIZ9y0LJZv3st5fdtz54W9adtUWfWRpoYvIkFzoLCYRz5YwQtfrKVt\n04ZMvfJ4zlZWfdRQwxeRoJi7Yhu3ebPqrxiSxq0jlVUfbdTwRSQg5bPqu7VpzH9uPJETOiurPhqp\n4YtIrTjnmLloE3e/s5S9B4v43Zk9uOn0bjSoF9qs+ppcOEUOp4YvIjW2YecBbpuxmHkrtjEgLYXJ\no/uR3q5pyF/X3wuniG9q+CLit5JSx4tfruPh2TnUMfjzhcdyxZCjj8iqD9VRuD8XTpHKqeGLiF+W\n5e1h/LQsfti4m9PT23Dvxf9NuywvlEfhVV04Raqnhi8iVTpYVMLfPl7F03NX0zw5ib9cOoAL+rWv\n9AtUoTwKr+zCKR18vPHIkdTwRaRSX6/ZwcTMbNZs388lAzsy6bxetKgmvrg2R+H+DgGNHZF+2KcH\ngOSkuowdke7nf1FiU8MXkSPsLihi8nvLefXbH+nUMpl/XjeIU3r4dz2Lmh6F12QIqOy+ZunUjhq+\niBzm/cWerPrt+w4x5tSu3DK8R42y6mt6FF7TIaBRA1LV4GtJDV9EgMOz6nu3b8ZzV59A347Na/x7\nanoUrhOx4aOGL5LgSksdr323gQfe82TV3zoynRtO6RpQVn1NjsJ1IjZ81PBFEthqb1b9t2t3cmLX\nVjwwui+dw5xVrxOx4aOGL5KACotLmTpvNX/5eBUN69XhoUv68fOMjjXKqg/Wl6t0IjZ8/LmmbUNg\nHtDAu/6bzrk7zawL8BrQEvgeuNI5VxjKYkUkcIs25DO+LKu+X3vuvKDmWfXB/nKVTsSGhz+DdIeA\nM5xzxwH9gZFmNgR4EHjMOdcD2AVcF7oyRSRQ+w8Vc/fbS7n471+Qf6CIZ67K4MnLBtbqwiRVzayR\n6OXPNW0dsM97N8n7zwFnAJd5l78E3AU8FfwSRSRQn+Zs5bbpi8nNL+DKIUdz68h0mgaQVa+ZNbHJ\nrzF8M6sLLAC6A08Cq4F851yxd5WNgD6PiUSZHfsOcc87S5mxaBPd2zbhzRtPJCMIWfWaWROb/Jp3\n5Zwrcc71BzoCg4Bevlbz9VwzG2Nm881s/rZt22pfqYj4zTnH9IUbGf7oXN7NzuP3Z/bg3d+dHJRm\nD56ZNclJh+fea2ZN9KvRLB3nXL6ZfQoMAVLMrJ73KL8jsKmS50wFpgJkZGT4fFMQkeApn1U/MC2F\nyZf045ijgptVr5k1scmfWTptgCJvs08GhuM5YfsJ8DM8M3WuBmaGslARqVpJqeOFL9byyAcrqGNw\n90XHcsXgo6lTx/+pljWhmTWxx58j/PbAS95x/DrAG865d8xsKfCamd0LLASeC2GdIlKFpZv2MCHT\nk1V/Zs+23DOqj8bT5Qj+zNLJAgb4WL4Gz3i+iETIwaIS/vLRSqbOW0NKo+qz6iWx6Zu2IjHq6zU7\nmJCZzdrt+/n58R257bxepDSqOqteEpsavkgIheLarp6s+mW8+u0G0lo24l/XDebkHq2DVLHEMzV8\nkRAJxbVd31+cx+0zl7Bj3yF+c2pXbhl+DMn161b/RBHU8EVCJpjXdt28+yB3zFzMB0u3cGyHZrxw\nzQn0Sa15Vr0kNjV8kRAJRvxAaanj1e9+ZPKs5RSWlDLhnJ5cd3IX6gWQVS+JSw1fJEQCjR9YvW0f\nE6Zl8+26nZzUrRX3Xxz+rHqJLzpMEAmR2sYPFBaX8rePV3LO45+Rs2UvD/2sH69cP1jNXgKmI3yR\nEKlN/MDCH3cxflo2OVtqn1UvUhk1fJEQ8jd+YP+hYh7+IIcXv1xHu2YNefaqDIb3PioMFUoiUcMX\nibBPcrYyafpiNu0u4IrBgWfVi1RGDV8kQnxl1R9/dHDii0V8UcMXCTNPVn0u97yzlH2HirlleA/+\nZ1g3GtTTF6gktNTwRcJow84DTJyezWcrtzMwLYUHL+lHjyBn1YtURg1fJAyKS0p58ct1P2XV33PR\nsVwewqx6EV/U8EVCbOmmPYzPzCJLWfUSYWr4IiFSllX/j3lraNEoib9dNoDz+iqrXiJHDV8kBL5a\nvYOJ05VVL9FFDV8kiHYfKOKB95bx2neerPpXrh/M0O7Kqpfo4M9FzDsBLwPtgFJgqnPuCTO7C7gB\n2OZddaJzblaoChWJZs453l+8mTveWsLO/YX85rSu3HKmsuoluvhzhF8M/NE5972ZNQUWmNkc72OP\nOeceDl15ItFPWfUSK/y5iHkekOe9vdfMlgGBXaNNJA6Uljr+/e2PPPieJ6t+/Dk9uV5Z9RLFajSG\nb2adgQHAN8BQ4GYzuwqYj+dTwC4fzxkDjAFIS0sLsFyR6LBq6z4mZGbx3bpdDO3uyao/upXiiyW6\nmXPOvxXNmgBzgfucc5lmdhSwHXDAPUB759y1Vf2OjIwMN3/+/ABLFomcwuJS/jF3NX/9eBXJ9esy\n6bxe/Oz4jppqKSFlZguccxmB/h6/jvDNLAmYBrzinMsEcM5tKff4M8A7gRYjEs2+/3EXE7xZ9ef3\na8+dFxxLm6YNIl2WiN/8maVjwHPAMufco+WWt/eO7wNcDCwOTYkikbXvUDEPz87hpa+UVS+xzZ8j\n/KHAlUC2mS3yLpsIXGpm/fEM6awDfhOSCkUi6JPlW5k0w5NVf9WQo/nTCGXVS+zyZ5bO54CvAUrN\nuZe4tX3fIe5+eylv/aCseokf+qatSDnOOTK/z+Wed5eyX1n1EmfU8EW8lFUv8U4NXxKesuolUajh\nS0Jbsmk346dlk52rrHqJf2r4kpAOFpXw+IcreeYzT1b9Xy8dwPn9lFUv8U0NXxLOl6u3MzEzm3U7\nDiirXhKKGr7EpRkLc5kyO4dN+QV0SElm7Ih0Tk9vy/2zlvH6fGXVS2JSw5e4M2NhLhMysykoKgEg\nN7+AW9/MokFSHQ4UliirXhKWGr7EnSmzc35q9mUKS0pxOGbeNFRZ9ZKwFNwtcWdTfoHP5UUlTs1e\nEpoavsSdyhIsUzXdUhKchnQkbhQWl/L03NXs2F94xGPJSXUZOyI9AlWJRA81fIkLC9bvYkJmFiu2\n7OOC4zowqHMLnp675rBZOqMG6MqcktjU8CWmVcyqf+7qDM7s5cmqv/LEzhGtTSTaqOFLzPp4+RYm\nTV9M3p6DXDnkaG4d2ZMmDbRLi1RGfx0Sc8pn1fdo24Q3bzyJ449uEemyRKKeGr7EDOcc077P5V5v\nVv3/DT+GG4d1VVa9iJ/8uaZtJ+BloB1QCkx1zj1hZi2B14HOeC5x+Avn3K7QlSqJ7Mcdnqz6z1dt\n5/ijWzB5dF9l1YvUkD9H+MXAH51z35tZU2CBmc0BrgE+cs5NNrPxwHhgXOhKlURUXFLK81+s5dE5\nK6hXp46y6kUC4M81bfOAPO/tvWa2DEgFLgKGeVd7CfgUNXwJosW5uxmfmcXi3D0M7+XJqm/fXF+e\nEqmtGo3hm1lnYADwDXCU980A51yembUNenWSkA7Pqq/Pk5cN5Ny+7ZRVLxIgvxu+mTUBpgG3OOf2\n+PvHZ2ZjgDEAaWlptalREsiXq7YzYXo263cc4JcZnZh4bi+aN0qKdFkiccGvhm9mSXia/SvOuUzv\n4i1m1t57dN8e2Orruc65qcBUgIyMDBeEmiUO7T5QxH2zlvLG/I0c3aoR/75+MCcpq14kqPyZpWPA\nc8Ay59yj5R56C7gamOz9OTMkFUpcc87xbnYed721lF0HCrnxtG7cMrwHDZM01VIk2Pw5wh8KXAlk\nm9ki77KJeBr9G2Z2HfAj8PPQlCjxKm93AbfPWMyHy7bSJ7UZL/76BMUXi4SQP7N0PgcqG7A/M7jl\nSCIoLXW88s16Hnw/h+LSUiae25Nrh3ahXl2ldYuEkr5pK2G1cstexmdms2D9Lk7u3pr7L+5LWqtG\nkS5LJCGo4UtYHCou4alPV/P3T1bTqEFdHvn5cYwemKqpliJhpIYvIbdg/S7GT8ti5dZ9XHhcB+64\noDetm/i+KpWIhI4avoTMvkPFTHl/OS9/vZ72zRry/DUZnNHzqEiXJZKw1PAlJD5atoVJMxazec9B\nrj6xM38aka6sepEI01+gBNW2vYf489tLeCcrj2OOasKTl5/EwDRl1YtEAzV8CQrnHG8u2Mi97y6j\noLCEP5x1DDee1o369TTVUiRaqOFLwNbv2M/E6dl8sWoHGUe3YPIlfeneVln1ItFGDV9q7Yis+lF9\nuHxQmrLqRaKUGr7UyuFZ9Udxz6hjlVUvEuXU8KVGCgpLePyjFTz72Vpl1YvEGDV88dsXq7YzITOb\nH3cqq14kFqnhS7XyDxRy37vL+M+CjXRu1Yh/3zCYk7opq14k1qjhS6X+m1W/hF0HivifYd34/ZnK\nqheJVWr44tOm/ALumOnJqu+b2pyXrh3EsR2UVS8Sy9Tw5TClpY5/fbOeB99bTolzTDqvF9ec1FlZ\n9SJxQA1fflI+q/6UHq25b5Sy6kXiiRq+cKi4hL9/spq/f7qKxg3qKateJE75cxHz54Hzga3OuT7e\nZXcBNwDbvKtNdM7NClWREjoL1u9k3LRsVm3dx0X9O3D7+cqqF4lX/hzhvwj8DXi5wvLHnHMPB70i\nCYu9B4uYMjuHf369ng7Nk3nhmhM4vWfbSJclIiHkz0XM55lZ59CXIuHy4VJPVv2WvQe55qTO/Ons\ndBorq14k7gXyV36zmV0FzAf+6Jzb5WslMxsDjAFIS0sL4OUkUNv2HuKut5fwblYe6Uc15e9XDFRW\nvUgCqe1cu6eAbkB/IA94pLIVnXNTnXMZzrmMNm3a1PLlJBDOOd6Yv4Hhj85lzpIt/PGsY3j7f09W\nsxdJMLU6wnfObSm7bWbPAO8ErSIJqvU79jMhM5svV+9gUOeW3D+6L93bNol0WSISAbVq+GbW3jmX\n5717MbA4eCVJMBSXlPLs52t5bM4K6tetw30X9+HSE5RVL5LI/JmW+SowDGhtZhuBO4FhZtYfcMA6\n4DchrFFqaHHubsZNy2LJpj2c1fso7rmoD+2aN4x0WSISYf7M0rnUx+LnQlCLBKigsITHP1zBs5+v\npWXj+jx1+UBG9lFWvYh4aC5enCifVX/poE6MH6msehE5nBp+jMs/UMi97y7jzQUb6dK6Ma/eMIQT\nu7WKdFkiEoXU8GOUc463s/K4+21PVv1vh3Xjd8qqF5EqqOHHoE35Bdw+YzEfLd9Kv47NefnawfTu\n0CzSZYlIlFPDjyElpY5/fb2eh95fTqlDWfUiUiNq+DFixZa9jJ+Wxfc/5nNKj9bcf3FfOrVUVr2I\n+E8NP8odKi7hyU9W89Snq2jSoB6P/fI4RvVXVr2I1JwafhjMWJjLlNk5bMovoENKMmNHpDNqQGq1\nz5u/bifjMz1Z9aO8WfWtlFUvIrWkhh9iMxbmMiEzm4KiEgBy8wuYkJkN4LPpz1iYy4PvLydv90EA\nWjRK4oVfn8Dp6cqqF5HAqOGH2JTZOT81+zIFRSVMmZ1zRMOfsTCXW9/MorCk9L/rFpaw+0BRta9T\n208RIpI4NL0jxDblF/i1fOveg0zIzD6s2QMcLC5lyuycKl+j7FNEbn4Bjv9+ipixMDeg2kUkvqjh\nh1iHlOQqlzvneOO7DQx/ZO4RnwTKVPamUaaqTxEiImXU8ENs7Ih0kn18+3X/oWKembeGy575hlun\nZdGzfTPaNvV9QrayN40y/n6KEJHEpoYfYqMGpPLA6L60qBBkll9QxH2zlrFwwy7uv7gvr90whInn\n9jrizSE5qS5jR6RX+RrVfYoQEQE1/LAYNSCVRvV9nx9PSa7PZYM9FyYpe3NITUnGgNSUZB4Y3bfa\nk6++PkX480YhIolFs3TCJLeS4ZUtew4edn/UgNQaz64pW782s3Q0u0ckcajhh8FnK7dRt45RUuqO\neCxYwy61eaOo6XcERCS2VTukY2bPm9lWM1tcbllLM5tjZiu9P1uEtszYtGt/IX984weufO5bWjaq\nT/0KIWeRHnbR7B6RxOLPGP6LwMgKy8YDHznnegAfee/HlRkLcxk6+WO6jH+XoZM/rtGcduccb/2w\nieGPzmXmolxuOr0bn407nYd+1q/G4/OhpNk9IonFn2vazjOzzhUWX4TnwuYALwGfAuOCWFdEBTLU\nkZtfwKTp2XySs43jOjbnX9cPplf7Zj89N5qGSjqkJPs8t6DZPSLxqbazdI5yzuUBeH/GVdBLbYY6\nSkodL36xlrMencvXa3Zy+/m9yfzt0J+afTTS7B6RxBLyk7ZmNgYYA5CWlhbqlwuKmg515Gzey7hp\nWSzakM+px7ThvlF9YiKrPpDZPSISe2rb8LeYWXvnXJ6ZtQe2Vraic24qMBUgIyPjyGkqUcjfoY5D\nxSU8+fEqnpq7Omaz6qNtmElEQqe2QzpvAVd7b18NzAxOOdHBn6GO79bt5NwnPuMvH6/i/H4d+PAP\np3HxgI4x1exFJLFUe4RvZq/iOUHb2sw2AncCk4E3zOw64Efg56EsMtyqGurYe7CIB99fzr++/pHU\nlGRe/PUJDFNWvYjEAHMufKMsGRkZbv78+WF7vWD7YMlm7pi5hK17D3LNSV3449nH0LiBvrsmIqFl\nZguccxmB/h51Kz9s3XuQu95awqzszfRs15Snrzye/p1SIl2WiEiNqOFXwTnHG/M3cN+7yzhYXMrY\nEemMObUrSXWVOScisUcNvxJrt+9nYmY2X63ZwaAuLXlgdF+6tWkS6bJERGpNDb+CopJSnvlsDU98\nuJL69erwwOi+/DKjE3XqaPaNiMQ2NfxysjbmM25aNsvy9jDy2Hb8+aJjOapZw0iXJSISFGr4wIHC\nYh6bs4LnPl9L6yYNePqKgYzs0z7SZYmIBFXCN/zPVm5j4vRsNuws4NJBaYw/pyfNk5Oqf6KISIxJ\n2Ia/a38h97y7lMzvc+naujGvjxnC4K6tIl2WiEjIJFzDL8uqv/vtpewuKOLm07tz8xndaVghSkFE\nJN4kVMOvKqteRCTeJUTDLyl1/POrdTw0Owfn4Pbze3PNSZ2pW2GqpS7oLSLxLO4bvr9Z9bqgt4jE\nu7ht+OWz6ps2TOLxX/bnov4dKo0vruoqV2r4IhIP4rLhf7duJ+OnZbF6235GD0hl0vm9adm4fpXP\n0QW9RSTexVXD33OwiIfKZdW/dO0gTjumjV/P1QW9RSTexU3DL59Vf93JXfjDWTXLqh87Iv2wMXzQ\nBb1FJL7EfMOvmFX/jyuP57haZNXrgt4iEu9ituGHIqteF/QWkXgWUMM3s3XAXqAEKA7GJbj8UT6r\nfrA3q76rsupFRKoUjCP8051z24Pwe6qlrHoRkdqLmSGd8ln15/Rpx58vPJa2yqoXEfFboA3fAR+Y\nmQP+4ZybWnEFMxsDjAFIS0ur8QscKCzm0Q9W8PwXZVn1xzOyT7sAyxYRSTyBNvyhzrlNZtYWmGNm\ny51z88qv4H0TmAqQkZHhavLL563wZNVv3FXA5YPTGHdOT5o1VFa9iEhtBNTwnXObvD+3mtl0YBAw\nr+pnVe+wrPo2jXnjNycyqEvLQH+tiEhCq3XDN7PGQB3n3F7v7bOBuwMppmJW/f+e0Z2bTldWvYhI\nMARyhH8UMN0bRlYP+Ldz7v3a/rKNuw4wacZiPs3ZRv9OKbxySV96tlNWvYhIsNS64Tvn1gDHBVpA\nSanjpS/X8fAHOQDceUFvrjrxyKx6EREJTESnZS7fvIdx07L5YUM+w9LbcO+oPnRscWRWvYiIBC4i\nDf9gUQlPfrKKpz5dTbPkJJ74VX8uPK7yrHoREQlc2Bv+t2t3Mj4zizXb9jN6YCqTzqs+q15ERAIX\n1oafm1/AL/7xFR1bJPPytYM41c+sehERCVxYG/7O/YXccUoX/u+sY2hUP2ZSHURE4kJYu273Nk24\n7bze4XxJERHxqn14fC0k19cXqEREIiWsDV9ERCJHDV9EJEGo4YuIJIioniozY2GuLiouIhIkUdvw\nZyzMZUJmNgVFJYBnDv+EzGwANX0RkVqI2iGdKbNzfmr2ZQqKSpgyOydCFYmIxLaobfib8gtqtFxE\nRKoWtQ2/Q0pyjZaLiEjVorbhjx2RTnKFK10lJ9Vl7Ij0CFUkIhLbovakbdmJWc3SEREJjoAavpmN\nBJ4A6gLPOucmB6Uqr1EDUtXgRUSCpNZDOmZWF3gSOAfoDVxqZkpGExGJUoGM4Q8CVjnn1jjnCoHX\ngIuCU5aIiARbIA0/FdhQ7v5G7zIREYlCgYzh+7oArTtiJbMxwBjv3UNmtjiA1wyX1sD2SBfhB9UZ\nPLFQI6jOYIuVOoMyPTGQhr/0vWA7AAAFWUlEQVQR6FTufkdgU8WVnHNTgakAZjbfOZcRwGuGheoM\nrlioMxZqBNUZbLFUZzB+TyBDOt8BPcysi5nVB34FvBWMokREJPhqfYTvnCs2s5uB2XimZT7vnFsS\ntMpERCSoApqH75ybBcyqwVOmBvJ6YaQ6gysW6oyFGkF1BltC1WnOHXGeVURE4lDUZumIiEhwhaTh\nm9lIM8sxs1VmNt7H4w3M7HXv49+YWedQ1FFNjZ3M7BMzW2ZmS8zs9z7WGWZmu81skfffHeGu01vH\nOjPL9tZwxNl68/iLd3tmmdnAMNeXXm4bLTKzPWZ2S4V1IrItzex5M9tafjqwmbU0szlmttL7s0Ul\nz73au85KM7s6AnVOMbPl3v+n080spZLnVrl/hKHOu8wst9z/23MreW6VfSEMdb5ersZ1ZraokueG\nZXtW1oNCun8654L6D88J3NVAV6A+8APQu8I6vwWe9t7+FfB6sOvwo872wEDv7abACh91DgPeCXdt\nPmpdB7Su4vFzgffwfDdiCPBNBGutC2wGjo6GbQmcCgwEFpdb9hAw3nt7PPCgj+e1BNZ4f7bw3m4R\n5jrPBup5bz/oq05/9o8w1HkX8Cc/9osq+0Ko66zw+CPAHZHcnpX1oFDun6E4wvcncuEi4CXv7TeB\nM83M1xe5QsY5l+ec+957ey+wjNj9pvBFwMvO42sgxczaR6iWM4HVzrn1EXr9wzjn5gE7Kywuv/+9\nBIzy8dQRwBzn3E7n3C5gDjAynHU65z5wzhV7736N57suEVXJ9vRHWKNYqqrT22t+Abwaqtf3RxU9\nKGT7Zygavj+RCz+t492hdwOtQlCLX7xDSgOAb3w8fKKZ/WBm75nZsWEt7L8c8IGZLTDPN5criqaY\ni19R+R9SNGxLgKOcc3ng+aMD2vpYJ5q2KcC1eD7F+VLd/hEON3uHnp6vZAgimrbnKcAW59zKSh4P\n+/as0INCtn+GouH7E7ngVyxDOJhZE2AacItzbk+Fh7/HMzRxHPBXYEa46/Ma6pwbiCeZ9CYzO7XC\n41GxPc3zBbwLgf/4eDhatqW/omKbApjZbUAx8Eolq1S3f4TaU0A3oD+Qh2e4pKKo2Z7ApVR9dB/W\n7VlND6r0aT6WVbs9Q9Hw/Ylc+GkdM6sHNKd2HxMDYmZJeDb0K865zIqPO+f2OOf2eW/PApLMrHWY\ny8Q5t8n7cyswHc/H4/L8irkIg3OA751zWyo+EC3b0mtL2ZCX9+dWH+tExTb1now7H7jceQdvK/Jj\n/wgp59wW51yJc64UeKaS14+W7VkPGA28Xtk64dyelfSgkO2foWj4/kQuvAWUnVX+GfBxZTtzqHjH\n8Z4DljnnHq1knXZl5xbMbBCe7bUjfFWCmTU2s6Zlt/GcyKsYQPcWcJV5DAF2l30kDLNKj5yiYVuW\nU37/uxqY6WOd2cDZZtbCO0RxtndZ2JjnAkPjgAudcwcqWcef/SOkKpwvuriS14+WKJbhwHLn3EZf\nD4Zze1bRg0K3f4bo7PO5eM44rwZu8y67G8+OC9AQz8f+VcC3QNdQng2vpMaT8XwEygIWef+dC9wI\n3Ohd52ZgCZ4ZBV8DJ0Wgzq7e1//BW0vZ9ixfp+G5GM1qIBvIiECdjfA08ObllkV8W+J5A8oDivAc\nFV2H53zRR8BK78+W3nUz8Fy5rey513r30VXAryNQ5yo847Rl+2fZzLYOwKyq9o8w1/lP736XhadZ\nta9Yp/f+EX0hnHV6l79Ytk+WWzci27OKHhSy/VPftBURSRD6pq2ISIJQwxcRSRBq+CIiCUINX0Qk\nQajhi4gkCDV8EZEEoYYvIpIg1PBFRBLE/wOF691fEa+RdwAAAABJRU5ErkJggg==\n",
  2947. "text/plain": [
  2948. "<Figure size 432x288 with 1 Axes>"
  2949. ]
  2950. },
  2951. "metadata": {},
  2952. "output_type": "display_data"
  2953. },
  2954. {
  2955. "name": "stdout",
  2956. "output_type": "stream",
  2957. "text": [
  2958. "1.9918575286865234 2.954965829849243\n"
  2959. ]
  2960. }
  2961. ],
  2962. "source": [
  2963. "# 随机初始化参数\n",
  2964. "w = t.rand(1, 1) \n",
  2965. "b = t.zeros(1, 1)\n",
  2966. "\n",
  2967. "lr =0.001 # 学习率\n",
  2968. "\n",
  2969. "for ii in range(20000):\n",
  2970. " x, y = get_fake_data()\n",
  2971. " \n",
  2972. " # forward:计算loss\n",
  2973. " y_pred = x.mm(w) + b.expand_as(y) # x@W等价于x.mm(w);for python3 only\n",
  2974. " loss = 0.5 * (y_pred - y) ** 2 # 均方误差\n",
  2975. " loss = loss.sum()\n",
  2976. " \n",
  2977. " # backward:手动计算梯度\n",
  2978. " dloss = 1\n",
  2979. " dy_pred = dloss * (y_pred - y)\n",
  2980. " \n",
  2981. " dw = x.t().mm(dy_pred)\n",
  2982. " db = dy_pred.sum()\n",
  2983. " \n",
  2984. " # 更新参数\n",
  2985. " w.sub_(lr * dw)\n",
  2986. " b.sub_(lr * db)\n",
  2987. " \n",
  2988. " if ii%1000 ==0:\n",
  2989. " \n",
  2990. " # 画图\n",
  2991. " display.clear_output(wait=True)\n",
  2992. " x = t.arange(0, 20).view(-1, 1)\n",
  2993. " y = x.mm(w) + b.expand_as(x)\n",
  2994. " plt.plot(x.numpy(), y.numpy()) # predicted\n",
  2995. " \n",
  2996. " x2, y2 = get_fake_data(batch_size=20) \n",
  2997. " plt.scatter(x2.numpy(), y2.numpy()) # true data\n",
  2998. " \n",
  2999. " plt.xlim(0, 20)\n",
  3000. " plt.ylim(0, 41)\n",
  3001. " plt.show()\n",
  3002. " plt.pause(0.5)\n",
  3003. " \n",
  3004. "print(w.squeeze()[0], b.squeeze()[0])"
  3005. ]
  3006. },
  3007. {
  3008. "cell_type": "markdown",
  3009. "metadata": {},
  3010. "source": [
  3011. "可见程序已经基本学出w=2、b=3,并且图中直线和数据已经实现较好的拟合。"
  3012. ]
  3013. },
  3014. {
  3015. "cell_type": "markdown",
  3016. "metadata": {},
  3017. "source": [
  3018. "虽然上面提到了许多操作,但是只要掌握了这个例子基本上就可以了,其他的知识,读者日后遇到的时候,可以再看看这部份的内容或者查找对应文档。\n"
  3019. ]
  3020. }
  3021. ],
  3022. "metadata": {
  3023. "kernelspec": {
  3024. "display_name": "Python 3",
  3025. "language": "python",
  3026. "name": "python3"
  3027. },
  3028. "language_info": {
  3029. "codemirror_mode": {
  3030. "name": "ipython",
  3031. "version": 3
  3032. },
  3033. "file_extension": ".py",
  3034. "mimetype": "text/x-python",
  3035. "name": "python",
  3036. "nbconvert_exporter": "python",
  3037. "pygments_lexer": "ipython3",
  3038. "version": "3.5.2"
  3039. }
  3040. },
  3041. "nbformat": 4,
  3042. "nbformat_minor": 2
  3043. }

机器学习越来越多应用到飞行器、机器人等领域,其目的是利用计算机实现类似人类的智能,从而实现装备的智能化与无人化。本课程旨在引导学生掌握机器学习的基本知识、典型方法与技术,通过具体的应用案例激发学生对该学科的兴趣,鼓励学生能够从人工智能的角度来分析、解决飞行器、机器人所面临的问题和挑战。本课程主要内容包括Python编程基础,机器学习模型,无监督学习、监督学习、深度学习基础知识与实现,并学习如何利用机器学习解决实际问题,从而全面提升自我的《综合能力》。