You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

stb_image.h 309 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640
  1. /* stb_image - v2.27 - public domain image loader - http://nothings.org/stb
  2. no warranty implied; use at your own risk
  3. Do this:
  4. #define STB_IMAGE_IMPLEMENTATION
  5. before you include this file in *one* C or C++ file to create the implementation.
  6. // i.e. it should look like this:
  7. #include ...
  8. #include ...
  9. #include ...
  10. #define STB_IMAGE_IMPLEMENTATION
  11. #include "stb_image.h"
  12. You can #define STBI_ASSERT(x) before the #include to avoid using assert.h.
  13. And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using
  14. malloc,realloc,free
  15. QUICK NOTES:
  16. Primarily of interest to game developers and other people who can
  17. avoid problematic images and only need the trivial interface
  18. JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG
  19. lib) PNG 1/2/4/8/16-bit-per-channel
  20. TGA (not sure what subset, if a subset)
  21. BMP non-1bpp, non-RLE
  22. PSD (composited view only, no extra channels, 8/16 bit-per-channel)
  23. GIF (*comp always reports as 4-channel)
  24. HDR (radiance rgbE format)
  25. PIC (Softimage PIC)
  26. PNM (PPM and PGM binary only)
  27. Animated GIF still needs a proper API, but here's one way to do it:
  28. http://gist.github.com/urraka/685d9a6340b26b830d49
  29. - decode from memory or through FILE (define STBI_NO_STDIO to remove code)
  30. - decode from arbitrary I/O callbacks
  31. - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON)
  32. Full documentation under "DOCUMENTATION" below.
  33. LICENSE
  34. See end of file for license information.
  35. RECENT REVISION HISTORY:
  36. 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes
  37. 2.26 (2020-07-13) many minor fixes
  38. 2.25 (2020-02-02) fix warnings
  39. 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically
  40. 2.23 (2019-08-11) fix clang static analysis warning
  41. 2.22 (2019-03-04) gif fixes, fix warnings
  42. 2.21 (2019-02-25) fix typo in comment
  43. 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform
  44. ifdefs 2.19 (2018-02-11) fix warning 2.18 (2018-01-30) fix warnings 2.17 (2018-01-29)
  45. bugfix, 1-bit BMP, 16-bitness query, fix warnings 2.16 (2017-07-23) all functions have
  46. 16-bit variants; optimizations; bugfixes 2.15 (2017-03-18) fix png-1,2,4; all Imagenet
  47. JPGs; no runtime SSE detection on GCC 2.14 (2017-03-03) remove deprecated
  48. STBI_JPEG_OLD; fixes for Imagenet JPGs 2.13 (2016-12-04) experimental 16-bit API, only
  49. for PNG so far; fixes 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused
  50. crashes 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 RGB-format JPEG;
  51. remove white matting in PSD; allocate large structures on the stack; correct channel
  52. count for PNG & BMP 2.10 (2016-01-22) avoid warning introduced in 2.09 2.09
  53. (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED
  54. See end of file for full revision history.
  55. ============================ Contributors =========================
  56. Image formats Extensions, features
  57. Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info)
  58. Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info)
  59. Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG)
  60. Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks)
  61. Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG)
  62. Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip)
  63. Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD)
  64. github:urraka (animated gif) Junggon Kim (PNM comments)
  65. Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA)
  66. socks-the-fox (16-bit PNG)
  67. Jeremy Sawicki (handle all ImageNet JPGs)
  68. Optimizations & bugfixes Mikhail Morozov (1-bit BMP)
  69. Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query)
  70. Arseny Kapoulkine Simon Breuss (16-bit PNM)
  71. John-Mark Allen
  72. Carmelo J Fdez-Aguera
  73. Bug & warning fixes
  74. Marc LeBlanc David Woo Guillaume George Martins Mozeiko
  75. Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz
  76. Roszkowski Phil Jordan Dave Moore Roy Eltham
  77. Hayaki Saito Nathan Reed Won Chun
  78. Luke Graham Johan Duparc Nick Verigakis the Horde3D
  79. community Thomas Ruf Ronny Chevalier github:rlyeh
  80. Janez Zemva John Bartholomew Michal Cichon github:romigrou
  81. Jonathan Blow Ken Hamada Tero Hanninen github:svdijk
  82. Eugene Golushkov Laurent Gomila Cort Stratton github:snagar
  83. Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex
  84. Cass Everitt Ryamond Barbiero github:grim210
  85. Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw
  86. Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus
  87. Josh Tobin Matthew Gregan github:poppolopoppo
  88. Julian Raschke Gregory Mullen Christian Floisand github:darealshinji
  89. Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 Brad
  90. Weinberger Matvey Cherevko github:mosra Luca Sas Alexander
  91. Veselov Zack Middleton [reserved] Ryan C. Gordon [reserved] [reserved]
  92. DO NOT ADD YOUR NAME HERE
  93. Jacko Dirks
  94. To add your name to the credits, pick a random blank space in the middle and fill it.
  95. 80% of merge conflicts on stb PRs are due to people adding their name at the end
  96. of the credits.
  97. */
  98. #ifndef STBI_INCLUDE_STB_IMAGE_H
  99. #define STBI_INCLUDE_STB_IMAGE_H
  100. // DOCUMENTATION
  101. //
  102. // Limitations:
  103. // - no 12-bit-per-channel JPEG
  104. // - no JPEGs with arithmetic coding
  105. // - GIF always returns *comp=4
  106. //
  107. // Basic usage (see HDR discussion below for HDR usage):
  108. // int x,y,n;
  109. // unsigned char *data = stbi_load(filename, &x, &y, &n, 0);
  110. // // ... process data if not NULL ...
  111. // // ... x = width, y = height, n = # 8-bit components per pixel ...
  112. // // ... replace '0' with '1'..'4' to force that many components per pixel
  113. // // ... but 'n' will always be the number that it would have been if you said 0
  114. // stbi_image_free(data)
  115. //
  116. // Standard parameters:
  117. // int *x -- outputs image width in pixels
  118. // int *y -- outputs image height in pixels
  119. // int *channels_in_file -- outputs # of image components in image file
  120. // int desired_channels -- if non-zero, # of image components requested in result
  121. //
  122. // The return value from an image loader is an 'unsigned char *' which points
  123. // to the pixel data, or NULL on an allocation failure or if the image is
  124. // corrupt or invalid. The pixel data consists of *y scanlines of *x pixels,
  125. // with each pixel consisting of N interleaved 8-bit components; the first
  126. // pixel pointed to is top-left-most in the image. There is no padding between
  127. // image scanlines or between pixels, regardless of format. The number of
  128. // components N is 'desired_channels' if desired_channels is non-zero, or
  129. // *channels_in_file otherwise. If desired_channels is non-zero,
  130. // *channels_in_file has the number of components that _would_ have been
  131. // output otherwise. E.g. if you set desired_channels to 4, you will always
  132. // get RGBA output, but you can check *channels_in_file to see if it's trivially
  133. // opaque because e.g. there were only 3 channels in the source image.
  134. //
  135. // An output image with N components has the following components interleaved
  136. // in this order in each pixel:
  137. //
  138. // N=#comp components
  139. // 1 grey
  140. // 2 grey, alpha
  141. // 3 red, green, blue
  142. // 4 red, green, blue, alpha
  143. //
  144. // If image loading fails for any reason, the return value will be NULL,
  145. // and *x, *y, *channels_in_file will be unchanged. The function
  146. // stbi_failure_reason() can be queried for an extremely brief, end-user
  147. // unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS
  148. // to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly
  149. // more user-friendly ones.
  150. //
  151. // Paletted PNG, BMP, GIF, and PIC images are automatically depalettized.
  152. //
  153. // To query the width, height and component count of an image without having to
  154. // decode the full file, you can use the stbi_info family of functions:
  155. //
  156. // int x,y,n,ok;
  157. // ok = stbi_info(filename, &x, &y, &n);
  158. // // returns ok=1 and sets x, y, n if image is a supported format,
  159. // // 0 otherwise.
  160. //
  161. // Note that stb_image pervasively uses ints in its public API for sizes,
  162. // including sizes of memory buffers. This is now part of the API and thus
  163. // hard to change without causing breakage. As a result, the various image
  164. // loaders all have certain limits on image size; these differ somewhat
  165. // by format but generally boil down to either just under 2GB or just under
  166. // 1GB. When the decoded image would be larger than this, stb_image decoding
  167. // will fail.
  168. //
  169. // Additionally, stb_image will reject image files that have any of their
  170. // dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS,
  171. // which defaults to 2**24 = 16777216 pixels. Due to the above memory limit,
  172. // the only way to have an image with such dimensions load correctly
  173. // is for it to have a rather extreme aspect ratio. Either way, the
  174. // assumption here is that such larger images are likely to be malformed
  175. // or malicious. If you do need to load an image with individual dimensions
  176. // larger than that, and it still fits in the overall size limit, you can
  177. // #define STBI_MAX_DIMENSIONS on your own to be something larger.
  178. //
  179. // ===========================================================================
  180. //
  181. // UNICODE:
  182. //
  183. // If compiling for Windows and you wish to use Unicode filenames, compile
  184. // with
  185. // #define STBI_WINDOWS_UTF8
  186. // and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert
  187. // Windows wchar_t filenames to utf8.
  188. //
  189. // ===========================================================================
  190. //
  191. // Philosophy
  192. //
  193. // stb libraries are designed with the following priorities:
  194. //
  195. // 1. easy to use
  196. // 2. easy to maintain
  197. // 3. good performance
  198. //
  199. // Sometimes I let "good performance" creep up in priority over "easy to maintain",
  200. // and for best performance I may provide less-easy-to-use APIs that give higher
  201. // performance, in addition to the easy-to-use ones. Nevertheless, it's important
  202. // to keep in mind that from the standpoint of you, a client of this library,
  203. // all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all.
  204. //
  205. // Some secondary priorities arise directly from the first two, some of which
  206. // provide more explicit reasons why performance can't be emphasized.
  207. //
  208. // - Portable ("ease of use")
  209. // - Small source code footprint ("easy to maintain")
  210. // - No dependencies ("ease of use")
  211. //
  212. // ===========================================================================
  213. //
  214. // I/O callbacks
  215. //
  216. // I/O callbacks allow you to read from arbitrary sources, like packaged
  217. // files or some other source. Data read from callbacks are processed
  218. // through a small internal buffer (currently 128 bytes) to try to reduce
  219. // overhead.
  220. //
  221. // The three functions you must define are "read" (reads some bytes of data),
  222. // "skip" (skips some bytes of data), "eof" (reports if the stream is at the end).
  223. //
  224. // ===========================================================================
  225. //
  226. // SIMD support
  227. //
  228. // The JPEG decoder will try to automatically use SIMD kernels on x86 when
  229. // supported by the compiler. For ARM Neon support, you must explicitly
  230. // request it.
  231. //
  232. // (The old do-it-yourself SIMD API is no longer supported in the current
  233. // code.)
  234. //
  235. // On x86, SSE2 will automatically be used when available based on a run-time
  236. // test; if not, the generic C versions are used as a fall-back. On ARM targets,
  237. // the typical path is to have separate builds for NEON and non-NEON devices
  238. // (at least this is true for iOS and Android). Therefore, the NEON support is
  239. // toggled by a build flag: define STBI_NEON to get NEON loops.
  240. //
  241. // If for some reason you do not want to use any of SIMD code, or if
  242. // you have issues compiling it, you can disable it entirely by
  243. // defining STBI_NO_SIMD.
  244. //
  245. // ===========================================================================
  246. //
  247. // HDR image support (disable by defining STBI_NO_HDR)
  248. //
  249. // stb_image supports loading HDR images in general, and currently the Radiance
  250. // .HDR file format specifically. You can still load any file through the existing
  251. // interface; if you attempt to load an HDR file, it will be automatically remapped
  252. // to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1;
  253. // both of these constants can be reconfigured through this interface:
  254. //
  255. // stbi_hdr_to_ldr_gamma(2.2f);
  256. // stbi_hdr_to_ldr_scale(1.0f);
  257. //
  258. // (note, do not use _inverse_ constants; stbi_image will invert them
  259. // appropriately).
  260. //
  261. // Additionally, there is a new, parallel interface for loading files as
  262. // (linear) floats to preserve the full dynamic range:
  263. //
  264. // float *data = stbi_loadf(filename, &x, &y, &n, 0);
  265. //
  266. // If you load LDR images through this interface, those images will
  267. // be promoted to floating point values, run through the inverse of
  268. // constants corresponding to the above:
  269. //
  270. // stbi_ldr_to_hdr_scale(1.0f);
  271. // stbi_ldr_to_hdr_gamma(2.2f);
  272. //
  273. // Finally, given a filename (or an open file or memory block--see header
  274. // file for details) containing image data, you can query for the "most
  275. // appropriate" interface to use (that is, whether the image is HDR or
  276. // not), using:
  277. //
  278. // stbi_is_hdr(char *filename);
  279. //
  280. // ===========================================================================
  281. //
  282. // iPhone PNG support:
  283. //
  284. // We optionally support converting iPhone-formatted PNGs (which store
  285. // premultiplied BGRA) back to RGB, even though they're internally encoded
  286. // differently. To enable this conversion, call
  287. // stbi_convert_iphone_png_to_rgb(1).
  288. //
  289. // Call stbi_set_unpremultiply_on_load(1) as well to force a divide per
  290. // pixel to remove any premultiplied alpha *only* if the image file explicitly
  291. // says there's premultiplied data (currently only happens in iPhone images,
  292. // and only if iPhone convert-to-rgb processing is on).
  293. //
  294. // ===========================================================================
  295. //
  296. // ADDITIONAL CONFIGURATION
  297. //
  298. // - You can suppress implementation of any of the decoders to reduce
  299. // your code footprint by #defining one or more of the following
  300. // symbols before creating the implementation.
  301. //
  302. // STBI_NO_JPEG
  303. // STBI_NO_PNG
  304. // STBI_NO_BMP
  305. // STBI_NO_PSD
  306. // STBI_NO_TGA
  307. // STBI_NO_GIF
  308. // STBI_NO_HDR
  309. // STBI_NO_PIC
  310. // STBI_NO_PNM (.ppm and .pgm)
  311. //
  312. // - You can request *only* certain decoders and suppress all other ones
  313. // (this will be more forward-compatible, as addition of new decoders
  314. // doesn't require you to disable them explicitly):
  315. //
  316. // STBI_ONLY_JPEG
  317. // STBI_ONLY_PNG
  318. // STBI_ONLY_BMP
  319. // STBI_ONLY_PSD
  320. // STBI_ONLY_TGA
  321. // STBI_ONLY_GIF
  322. // STBI_ONLY_HDR
  323. // STBI_ONLY_PIC
  324. // STBI_ONLY_PNM (.ppm and .pgm)
  325. //
  326. // - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still
  327. // want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB
  328. //
  329. // - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater
  330. // than that size (in either width or height) without further processing.
  331. // This is to let programs in the wild set an upper bound to prevent
  332. // denial-of-service attacks on untrusted data, as one could generate a
  333. // valid image of gigantic dimensions and force stb_image to allocate a
  334. // huge block of memory and spend disproportionate time decoding it. By
  335. // default this is set to (1 << 24), which is 16777216, but that's still
  336. // very big.
  337. #ifndef STBI_NO_STDIO
  338. #include <stdio.h>
  339. #endif // STBI_NO_STDIO
  340. #define STBI_VERSION 1
  341. enum {
  342. STBI_default = 0, // only used for desired_channels
  343. STBI_grey = 1,
  344. STBI_grey_alpha = 2,
  345. STBI_rgb = 3,
  346. STBI_rgb_alpha = 4
  347. };
  348. #include <stdlib.h>
  349. typedef unsigned char stbi_uc;
  350. typedef unsigned short stbi_us;
  351. #ifdef __cplusplus
  352. extern "C" {
  353. #endif
  354. #ifndef STBIDEF
  355. #ifdef STB_IMAGE_STATIC
  356. #define STBIDEF static
  357. #else
  358. #define STBIDEF extern
  359. #endif
  360. #endif
  361. //////////////////////////////////////////////////////////////////////////////
  362. //
  363. // PRIMARY API - works on images of any type
  364. //
  365. //
  366. // load image by filename, open file, or memory buffer
  367. //
  368. typedef struct {
  369. int (*read)(
  370. void* user, char* data, int size); // fill 'data' with 'size' bytes. return
  371. // number of bytes actually read
  372. void (*skip)(
  373. void* user,
  374. int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative
  375. int (*eof)(void* user); // returns nonzero if we are at end of file/data
  376. } stbi_io_callbacks;
  377. ////////////////////////////////////
  378. //
  379. // 8-bits-per-channel interface
  380. //
  381. STBIDEF stbi_uc* stbi_load_from_memory(
  382. stbi_uc const* buffer, int len, int* x, int* y, int* channels_in_file,
  383. int desired_channels);
  384. STBIDEF stbi_uc* stbi_load_from_callbacks(
  385. stbi_io_callbacks const* clbk, void* user, int* x, int* y,
  386. int* channels_in_file, int desired_channels);
  387. #ifndef STBI_NO_STDIO
  388. STBIDEF stbi_uc* stbi_load(
  389. char const* filename, int* x, int* y, int* channels_in_file,
  390. int desired_channels);
  391. STBIDEF stbi_uc* stbi_load_from_file(
  392. FILE* f, int* x, int* y, int* channels_in_file, int desired_channels);
  393. // for stbi_load_from_file, file pointer is left pointing immediately after image
  394. #endif
  395. #ifndef STBI_NO_GIF
  396. STBIDEF stbi_uc* stbi_load_gif_from_memory(
  397. stbi_uc const* buffer, int len, int** delays, int* x, int* y, int* z, int* comp,
  398. int req_comp);
  399. #endif
  400. #ifdef STBI_WINDOWS_UTF8
  401. STBIDEF int stbi_convert_wchar_to_utf8(
  402. char* buffer, size_t bufferlen, const wchar_t* input);
  403. #endif
  404. ////////////////////////////////////
  405. //
  406. // 16-bits-per-channel interface
  407. //
  408. STBIDEF stbi_us* stbi_load_16_from_memory(
  409. stbi_uc const* buffer, int len, int* x, int* y, int* channels_in_file,
  410. int desired_channels);
  411. STBIDEF stbi_us* stbi_load_16_from_callbacks(
  412. stbi_io_callbacks const* clbk, void* user, int* x, int* y,
  413. int* channels_in_file, int desired_channels);
  414. #ifndef STBI_NO_STDIO
  415. STBIDEF stbi_us* stbi_load_16(
  416. char const* filename, int* x, int* y, int* channels_in_file,
  417. int desired_channels);
  418. STBIDEF stbi_us* stbi_load_from_file_16(
  419. FILE* f, int* x, int* y, int* channels_in_file, int desired_channels);
  420. #endif
  421. ////////////////////////////////////
  422. //
  423. // float-per-channel interface
  424. //
  425. #ifndef STBI_NO_LINEAR
  426. STBIDEF float* stbi_loadf_from_memory(
  427. stbi_uc const* buffer, int len, int* x, int* y, int* channels_in_file,
  428. int desired_channels);
  429. STBIDEF float* stbi_loadf_from_callbacks(
  430. stbi_io_callbacks const* clbk, void* user, int* x, int* y,
  431. int* channels_in_file, int desired_channels);
  432. #ifndef STBI_NO_STDIO
  433. STBIDEF float* stbi_loadf(
  434. char const* filename, int* x, int* y, int* channels_in_file,
  435. int desired_channels);
  436. STBIDEF float* stbi_loadf_from_file(
  437. FILE* f, int* x, int* y, int* channels_in_file, int desired_channels);
  438. #endif
  439. #endif
  440. #ifndef STBI_NO_HDR
  441. STBIDEF void stbi_hdr_to_ldr_gamma(float gamma);
  442. STBIDEF void stbi_hdr_to_ldr_scale(float scale);
  443. #endif // STBI_NO_HDR
  444. #ifndef STBI_NO_LINEAR
  445. STBIDEF void stbi_ldr_to_hdr_gamma(float gamma);
  446. STBIDEF void stbi_ldr_to_hdr_scale(float scale);
  447. #endif // STBI_NO_LINEAR
  448. // stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR
  449. STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const* clbk, void* user);
  450. STBIDEF int stbi_is_hdr_from_memory(stbi_uc const* buffer, int len);
  451. #ifndef STBI_NO_STDIO
  452. STBIDEF int stbi_is_hdr(char const* filename);
  453. STBIDEF int stbi_is_hdr_from_file(FILE* f);
  454. #endif // STBI_NO_STDIO
  455. // get a VERY brief reason for failure
  456. // on most compilers (and ALL modern mainstream compilers) this is threadsafe
  457. STBIDEF const char* stbi_failure_reason(void);
  458. // free the loaded image -- this is just free()
  459. STBIDEF void stbi_image_free(void* retval_from_stbi_load);
  460. // get image dimensions & components without fully decoding
  461. STBIDEF int stbi_info_from_memory(
  462. stbi_uc const* buffer, int len, int* x, int* y, int* comp);
  463. STBIDEF int stbi_info_from_callbacks(
  464. stbi_io_callbacks const* clbk, void* user, int* x, int* y, int* comp);
  465. STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const* buffer, int len);
  466. STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const* clbk, void* user);
  467. #ifndef STBI_NO_STDIO
  468. STBIDEF int stbi_info(char const* filename, int* x, int* y, int* comp);
  469. STBIDEF int stbi_info_from_file(FILE* f, int* x, int* y, int* comp);
  470. STBIDEF int stbi_is_16_bit(char const* filename);
  471. STBIDEF int stbi_is_16_bit_from_file(FILE* f);
  472. #endif
  473. // for image formats that explicitly notate that they have premultiplied alpha,
  474. // we just return the colors as stored in the file. set this flag to force
  475. // unpremultiplication. results are undefined if the unpremultiply overflow.
  476. STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply);
  477. // indicate whether we should process iphone images back to canonical format,
  478. // or just pass them through "as-is"
  479. STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert);
  480. // flip the image vertically, so the first pixel in the output array is the bottom left
  481. STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip);
  482. // as above, but only applies to images loaded on the thread that calls the function
  483. // this function is only available if your compiler supports thread-local variables;
  484. // calling it will fail to link if your compiler doesn't
  485. STBIDEF void stbi_set_unpremultiply_on_load_thread(
  486. int flag_true_if_should_unpremultiply);
  487. STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert);
  488. STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip);
  489. // ZLIB client - used by PNG, available for other purposes
  490. STBIDEF char* stbi_zlib_decode_malloc_guesssize(
  491. const char* buffer, int len, int initial_size, int* outlen);
  492. STBIDEF char* stbi_zlib_decode_malloc_guesssize_headerflag(
  493. const char* buffer, int len, int initial_size, int* outlen, int parse_header);
  494. STBIDEF char* stbi_zlib_decode_malloc(const char* buffer, int len, int* outlen);
  495. STBIDEF int stbi_zlib_decode_buffer(
  496. char* obuffer, int olen, const char* ibuffer, int ilen);
  497. STBIDEF char* stbi_zlib_decode_noheader_malloc(
  498. const char* buffer, int len, int* outlen);
  499. STBIDEF int stbi_zlib_decode_noheader_buffer(
  500. char* obuffer, int olen, const char* ibuffer, int ilen);
  501. #ifdef __cplusplus
  502. }
  503. #endif
  504. //
  505. //
  506. //// end header file /////////////////////////////////////////////////////
  507. #endif // STBI_INCLUDE_STB_IMAGE_H
  508. #ifdef STB_IMAGE_IMPLEMENTATION
  509. #if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) || \
  510. defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) || \
  511. defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) || \
  512. defined(STBI_ONLY_ZLIB)
  513. #ifndef STBI_ONLY_JPEG
  514. #define STBI_NO_JPEG
  515. #endif
  516. #ifndef STBI_ONLY_PNG
  517. #define STBI_NO_PNG
  518. #endif
  519. #ifndef STBI_ONLY_BMP
  520. #define STBI_NO_BMP
  521. #endif
  522. #ifndef STBI_ONLY_PSD
  523. #define STBI_NO_PSD
  524. #endif
  525. #ifndef STBI_ONLY_TGA
  526. #define STBI_NO_TGA
  527. #endif
  528. #ifndef STBI_ONLY_GIF
  529. #define STBI_NO_GIF
  530. #endif
  531. #ifndef STBI_ONLY_HDR
  532. #define STBI_NO_HDR
  533. #endif
  534. #ifndef STBI_ONLY_PIC
  535. #define STBI_NO_PIC
  536. #endif
  537. #ifndef STBI_ONLY_PNM
  538. #define STBI_NO_PNM
  539. #endif
  540. #endif
  541. #if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB)
  542. #define STBI_NO_ZLIB
  543. #endif
  544. #include <limits.h>
  545. #include <stdarg.h>
  546. #include <stddef.h> // ptrdiff_t on osx
  547. #include <stdlib.h>
  548. #include <string.h>
  549. #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
  550. #include <math.h> // ldexp, pow
  551. #endif
  552. #ifndef STBI_NO_STDIO
  553. #include <stdio.h>
  554. #endif
  555. #ifndef STBI_ASSERT
  556. #include <assert.h>
  557. #define STBI_ASSERT(x) assert(x)
  558. #endif
  559. #ifdef __cplusplus
  560. #define STBI_EXTERN extern "C"
  561. #else
  562. #define STBI_EXTERN extern
  563. #endif
  564. #ifndef _MSC_VER
  565. #ifdef __cplusplus
  566. #define stbi_inline inline
  567. #else
  568. #define stbi_inline
  569. #endif
  570. #else
  571. #define stbi_inline __forceinline
  572. #endif
  573. #ifndef STBI_NO_THREAD_LOCALS
  574. #if defined(__cplusplus) && __cplusplus >= 201103L
  575. #define STBI_THREAD_LOCAL thread_local
  576. #elif defined(__GNUC__) && __GNUC__ < 5
  577. #define STBI_THREAD_LOCAL __thread
  578. #elif defined(_MSC_VER)
  579. #define STBI_THREAD_LOCAL __declspec(thread)
  580. #elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && \
  581. !defined(__STDC_NO_THREADS__)
  582. #define STBI_THREAD_LOCAL _Thread_local
  583. #endif
  584. #ifndef STBI_THREAD_LOCAL
  585. #if defined(__GNUC__)
  586. #define STBI_THREAD_LOCAL __thread
  587. #endif
  588. #endif
  589. #endif
  590. #ifdef _MSC_VER
  591. typedef unsigned short stbi__uint16;
  592. typedef signed short stbi__int16;
  593. typedef unsigned int stbi__uint32;
  594. typedef signed int stbi__int32;
  595. #else
  596. #include <stdint.h>
  597. typedef uint16_t stbi__uint16;
  598. typedef int16_t stbi__int16;
  599. typedef uint32_t stbi__uint32;
  600. typedef int32_t stbi__int32;
  601. #endif
  602. // should produce compiler error if size is wrong
  603. typedef unsigned char validate_uint32[sizeof(stbi__uint32) == 4 ? 1 : -1];
  604. #ifdef _MSC_VER
  605. #define STBI_NOTUSED(v) (void)(v)
  606. #else
  607. #define STBI_NOTUSED(v) (void)sizeof(v)
  608. #endif
  609. #ifdef _MSC_VER
  610. #define STBI_HAS_LROTL
  611. #endif
  612. #ifdef STBI_HAS_LROTL
  613. #define stbi_lrot(x, y) _lrotl(x, y)
  614. #else
  615. #define stbi_lrot(x, y) (((x) << (y)) | ((x) >> (-(y)&31)))
  616. #endif
  617. #if defined(STBI_MALLOC) && defined(STBI_FREE) && \
  618. (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED))
  619. // ok
  620. #elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && \
  621. !defined(STBI_REALLOC_SIZED)
  622. // ok
  623. #else
  624. #error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)."
  625. #endif
  626. #ifndef STBI_MALLOC
  627. #define STBI_MALLOC(sz) malloc(sz)
  628. #define STBI_REALLOC(p, newsz) realloc(p, newsz)
  629. #define STBI_FREE(p) free(p)
  630. #endif
  631. #ifndef STBI_REALLOC_SIZED
  632. #define STBI_REALLOC_SIZED(p, oldsz, newsz) STBI_REALLOC(p, newsz)
  633. #endif
  634. // x86/x64 detection
  635. #if defined(__x86_64__) || defined(_M_X64)
  636. #define STBI__X64_TARGET
  637. #elif defined(__i386) || defined(_M_IX86)
  638. #define STBI__X86_TARGET
  639. #endif
  640. #if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && \
  641. !defined(STBI_NO_SIMD)
  642. // gcc doesn't support sse2 intrinsics unless you compile with -msse2,
  643. // which in turn means it gets to use SSE2 everywhere. This is unfortunate,
  644. // but previous attempts to provide the SSE2 functions with runtime
  645. // detection caused numerous issues. The way architecture extensions are
  646. // exposed in GCC/Clang is, sadly, not really suited for one-file libs.
  647. // New behavior: if compiled with -msse2, we use SSE2 without any
  648. // detection; if not, we don't use it at all.
  649. #define STBI_NO_SIMD
  650. #endif
  651. #if defined(__MINGW32__) && defined(STBI__X86_TARGET) && \
  652. !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD)
  653. // Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid
  654. // STBI__X64_TARGET
  655. //
  656. // 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the
  657. // Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant.
  658. // As a result, enabling SSE2 on 32-bit MinGW is dangerous when not
  659. // simultaneously enabling "-mstackrealign".
  660. //
  661. // See https://github.com/nothings/stb/issues/81 for more information.
  662. //
  663. // So default to no SSE2 on 32-bit MinGW. If you've read this far and added
  664. // -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2.
  665. #define STBI_NO_SIMD
  666. #endif
  667. #if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET))
  668. #define STBI_SSE2
  669. #include <emmintrin.h>
  670. #ifdef _MSC_VER
  671. #if _MSC_VER >= 1400 // not VC6
  672. #include <intrin.h> // __cpuid
  673. static int stbi__cpuid3(void) {
  674. int info[4];
  675. __cpuid(info, 1);
  676. return info[3];
  677. }
  678. #else
  679. static int stbi__cpuid3(void) {
  680. int res;
  681. __asm {
  682. mov eax,1
  683. cpuid
  684. mov res,edx
  685. }
  686. return res;
  687. }
  688. #endif
  689. #define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name
  690. #if !defined(STBI_NO_JPEG) && defined(STBI_SSE2)
  691. static int stbi__sse2_available(void) {
  692. int info3 = stbi__cpuid3();
  693. return ((info3 >> 26) & 1) != 0;
  694. }
  695. #endif
  696. #else // assume GCC-style if not VC++
  697. #define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
  698. #if !defined(STBI_NO_JPEG) && defined(STBI_SSE2)
  699. static int stbi__sse2_available(void) {
  700. // If we're even attempting to compile this on GCC/Clang, that means
  701. // -msse2 is on, which means the compiler is allowed to use SSE2
  702. // instructions at will, and so are we.
  703. return 1;
  704. }
  705. #endif
  706. #endif
  707. #endif
  708. // ARM NEON
  709. #if defined(STBI_NO_SIMD) && defined(STBI_NEON)
  710. #undef STBI_NEON
  711. #endif
  712. #ifdef STBI_NEON
  713. #include <arm_neon.h>
  714. #ifdef _MSC_VER
  715. #define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name
  716. #else
  717. #define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
  718. #endif
  719. #endif
  720. #ifndef STBI_SIMD_ALIGN
  721. #define STBI_SIMD_ALIGN(type, name) type name
  722. #endif
  723. #ifndef STBI_MAX_DIMENSIONS
  724. #define STBI_MAX_DIMENSIONS (1 << 24)
  725. #endif
  726. ///////////////////////////////////////////////
  727. //
  728. // stbi__context struct and start_xxx functions
  729. // stbi__context structure is our basic context used by all images, so it
  730. // contains all the IO context, plus some basic image information
  731. typedef struct {
  732. stbi__uint32 img_x, img_y;
  733. int img_n, img_out_n;
  734. stbi_io_callbacks io;
  735. void* io_user_data;
  736. int read_from_callbacks;
  737. int buflen;
  738. stbi_uc buffer_start[128];
  739. int callback_already_read;
  740. stbi_uc *img_buffer, *img_buffer_end;
  741. stbi_uc *img_buffer_original, *img_buffer_original_end;
  742. } stbi__context;
  743. static void stbi__refill_buffer(stbi__context* s);
  744. // initialize a memory-decode context
  745. static void stbi__start_mem(stbi__context* s, stbi_uc const* buffer, int len) {
  746. s->io.read = NULL;
  747. s->read_from_callbacks = 0;
  748. s->callback_already_read = 0;
  749. s->img_buffer = s->img_buffer_original = (stbi_uc*)buffer;
  750. s->img_buffer_end = s->img_buffer_original_end = (stbi_uc*)buffer + len;
  751. }
  752. // initialize a callback-based context
  753. static void stbi__start_callbacks(stbi__context* s, stbi_io_callbacks* c, void* user) {
  754. s->io = *c;
  755. s->io_user_data = user;
  756. s->buflen = sizeof(s->buffer_start);
  757. s->read_from_callbacks = 1;
  758. s->callback_already_read = 0;
  759. s->img_buffer = s->img_buffer_original = s->buffer_start;
  760. stbi__refill_buffer(s);
  761. s->img_buffer_original_end = s->img_buffer_end;
  762. }
  763. #ifndef STBI_NO_STDIO
  764. static int stbi__stdio_read(void* user, char* data, int size) {
  765. return (int)fread(data, 1, size, (FILE*)user);
  766. }
  767. static void stbi__stdio_skip(void* user, int n) {
  768. int ch;
  769. fseek((FILE*)user, n, SEEK_CUR);
  770. ch = fgetc((FILE*)user); /* have to read a byte to reset feof()'s flag */
  771. if (ch != EOF) {
  772. ungetc(ch, (FILE*)user); /* push byte back onto stream if valid. */
  773. }
  774. }
  775. static int stbi__stdio_eof(void* user) {
  776. return feof((FILE*)user) || ferror((FILE*)user);
  777. }
  778. static stbi_io_callbacks stbi__stdio_callbacks = {
  779. stbi__stdio_read,
  780. stbi__stdio_skip,
  781. stbi__stdio_eof,
  782. };
  783. static void stbi__start_file(stbi__context* s, FILE* f) {
  784. stbi__start_callbacks(s, &stbi__stdio_callbacks, (void*)f);
  785. }
  786. // static void stop_file(stbi__context *s) { }
  787. #endif // !STBI_NO_STDIO
  788. static void stbi__rewind(stbi__context* s) {
  789. // conceptually rewind SHOULD rewind to the beginning of the stream,
  790. // but we just rewind to the beginning of the initial buffer, because
  791. // we only use it after doing 'test', which only ever looks at at most 92 bytes
  792. s->img_buffer = s->img_buffer_original;
  793. s->img_buffer_end = s->img_buffer_original_end;
  794. }
  795. enum { STBI_ORDER_RGB, STBI_ORDER_BGR };
  796. typedef struct {
  797. int bits_per_channel;
  798. int num_channels;
  799. int channel_order;
  800. } stbi__result_info;
  801. #ifndef STBI_NO_JPEG
  802. static int stbi__jpeg_test(stbi__context* s);
  803. static void* stbi__jpeg_load(
  804. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  805. stbi__result_info* ri);
  806. static int stbi__jpeg_info(stbi__context* s, int* x, int* y, int* comp);
  807. #endif
  808. #ifndef STBI_NO_PNG
  809. static int stbi__png_test(stbi__context* s);
  810. static void* stbi__png_load(
  811. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  812. stbi__result_info* ri);
  813. static int stbi__png_info(stbi__context* s, int* x, int* y, int* comp);
  814. static int stbi__png_is16(stbi__context* s);
  815. #endif
  816. #ifndef STBI_NO_BMP
  817. static int stbi__bmp_test(stbi__context* s);
  818. static void* stbi__bmp_load(
  819. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  820. stbi__result_info* ri);
  821. static int stbi__bmp_info(stbi__context* s, int* x, int* y, int* comp);
  822. #endif
  823. #ifndef STBI_NO_TGA
  824. static int stbi__tga_test(stbi__context* s);
  825. static void* stbi__tga_load(
  826. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  827. stbi__result_info* ri);
  828. static int stbi__tga_info(stbi__context* s, int* x, int* y, int* comp);
  829. #endif
  830. #ifndef STBI_NO_PSD
  831. static int stbi__psd_test(stbi__context* s);
  832. static void* stbi__psd_load(
  833. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  834. stbi__result_info* ri, int bpc);
  835. static int stbi__psd_info(stbi__context* s, int* x, int* y, int* comp);
  836. static int stbi__psd_is16(stbi__context* s);
  837. #endif
  838. #ifndef STBI_NO_HDR
  839. static int stbi__hdr_test(stbi__context* s);
  840. static float* stbi__hdr_load(
  841. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  842. stbi__result_info* ri);
  843. static int stbi__hdr_info(stbi__context* s, int* x, int* y, int* comp);
  844. #endif
  845. #ifndef STBI_NO_PIC
  846. static int stbi__pic_test(stbi__context* s);
  847. static void* stbi__pic_load(
  848. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  849. stbi__result_info* ri);
  850. static int stbi__pic_info(stbi__context* s, int* x, int* y, int* comp);
  851. #endif
  852. #ifndef STBI_NO_GIF
  853. static int stbi__gif_test(stbi__context* s);
  854. static void* stbi__gif_load(
  855. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  856. stbi__result_info* ri);
  857. static void* stbi__load_gif_main(
  858. stbi__context* s, int** delays, int* x, int* y, int* z, int* comp,
  859. int req_comp);
  860. static int stbi__gif_info(stbi__context* s, int* x, int* y, int* comp);
  861. #endif
  862. #ifndef STBI_NO_PNM
  863. static int stbi__pnm_test(stbi__context* s);
  864. static void* stbi__pnm_load(
  865. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  866. stbi__result_info* ri);
  867. static int stbi__pnm_info(stbi__context* s, int* x, int* y, int* comp);
  868. static int stbi__pnm_is16(stbi__context* s);
  869. #endif
  870. static
  871. #ifdef STBI_THREAD_LOCAL
  872. STBI_THREAD_LOCAL
  873. #endif
  874. const char* stbi__g_failure_reason;
  875. STBIDEF const char* stbi_failure_reason(void) {
  876. return stbi__g_failure_reason;
  877. }
  878. #ifndef STBI_NO_FAILURE_STRINGS
  879. static int stbi__err(const char* str) {
  880. stbi__g_failure_reason = str;
  881. return 0;
  882. }
  883. #endif
  884. static void* stbi__malloc(size_t size) {
  885. return STBI_MALLOC(size);
  886. }
  887. // stb_image uses ints pervasively, including for offset calculations.
  888. // therefore the largest decoded image size we can support with the
  889. // current code, even on 64-bit targets, is INT_MAX. this is not a
  890. // significant limitation for the intended use case.
  891. //
  892. // we do, however, need to make sure our size calculations don't
  893. // overflow. hence a few helper functions for size calculations that
  894. // multiply integers together, making sure that they're non-negative
  895. // and no overflow occurs.
  896. // return 1 if the sum is valid, 0 on overflow.
  897. // negative terms are considered invalid.
  898. static int stbi__addsizes_valid(int a, int b) {
  899. if (b < 0)
  900. return 0;
  901. // now 0 <= b <= INT_MAX, hence also
  902. // 0 <= INT_MAX - b <= INTMAX.
  903. // And "a + b <= INT_MAX" (which might overflow) is the
  904. // same as a <= INT_MAX - b (no overflow)
  905. return a <= INT_MAX - b;
  906. }
  907. // returns 1 if the product is valid, 0 on overflow.
  908. // negative factors are considered invalid.
  909. static int stbi__mul2sizes_valid(int a, int b) {
  910. if (a < 0 || b < 0)
  911. return 0;
  912. if (b == 0)
  913. return 1; // mul-by-0 is always safe
  914. // portable way to check for no overflows in a*b
  915. return a <= INT_MAX / b;
  916. }
  917. #if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || \
  918. !defined(STBI_NO_HDR)
  919. // returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow
  920. static int stbi__mad2sizes_valid(int a, int b, int add) {
  921. return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a * b, add);
  922. }
  923. #endif
  924. // returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow
  925. static int stbi__mad3sizes_valid(int a, int b, int c, int add) {
  926. return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a * b, c) &&
  927. stbi__addsizes_valid(a * b * c, add);
  928. }
  929. // returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow
  930. #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM)
  931. static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) {
  932. return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a * b, c) &&
  933. stbi__mul2sizes_valid(a * b * c, d) &&
  934. stbi__addsizes_valid(a * b * c * d, add);
  935. }
  936. #endif
  937. #if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || \
  938. !defined(STBI_NO_HDR)
  939. // mallocs with size overflow checking
  940. static void* stbi__malloc_mad2(int a, int b, int add) {
  941. if (!stbi__mad2sizes_valid(a, b, add))
  942. return NULL;
  943. return stbi__malloc(a * b + add);
  944. }
  945. #endif
  946. static void* stbi__malloc_mad3(int a, int b, int c, int add) {
  947. if (!stbi__mad3sizes_valid(a, b, c, add))
  948. return NULL;
  949. return stbi__malloc(a * b * c + add);
  950. }
  951. #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM)
  952. static void* stbi__malloc_mad4(int a, int b, int c, int d, int add) {
  953. if (!stbi__mad4sizes_valid(a, b, c, d, add))
  954. return NULL;
  955. return stbi__malloc(a * b * c * d + add);
  956. }
  957. #endif
  958. // stbi__err - error
  959. // stbi__errpf - error returning pointer to float
  960. // stbi__errpuc - error returning pointer to unsigned char
  961. #ifdef STBI_NO_FAILURE_STRINGS
  962. #define stbi__err(x, y) 0
  963. #elif defined(STBI_FAILURE_USERMSG)
  964. #define stbi__err(x, y) stbi__err(y)
  965. #else
  966. #define stbi__err(x, y) stbi__err(x)
  967. #endif
  968. #define stbi__errpf(x, y) ((float*)(size_t)(stbi__err(x, y) ? NULL : NULL))
  969. #define stbi__errpuc(x, y) ((unsigned char*)(size_t)(stbi__err(x, y) ? NULL : NULL))
  970. STBIDEF void stbi_image_free(void* retval_from_stbi_load) {
  971. STBI_FREE(retval_from_stbi_load);
  972. }
  973. #ifndef STBI_NO_LINEAR
  974. static float* stbi__ldr_to_hdr(stbi_uc* data, int x, int y, int comp);
  975. #endif
  976. #ifndef STBI_NO_HDR
  977. static stbi_uc* stbi__hdr_to_ldr(float* data, int x, int y, int comp);
  978. #endif
  979. static int stbi__vertically_flip_on_load_global = 0;
  980. STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) {
  981. stbi__vertically_flip_on_load_global = flag_true_if_should_flip;
  982. }
  983. #ifndef STBI_THREAD_LOCAL
  984. #define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global
  985. #else
  986. static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local,
  987. stbi__vertically_flip_on_load_set;
  988. STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) {
  989. stbi__vertically_flip_on_load_local = flag_true_if_should_flip;
  990. stbi__vertically_flip_on_load_set = 1;
  991. }
  992. #define stbi__vertically_flip_on_load \
  993. (stbi__vertically_flip_on_load_set ? stbi__vertically_flip_on_load_local \
  994. : stbi__vertically_flip_on_load_global)
  995. #endif // STBI_THREAD_LOCAL
  996. static void* stbi__load_main(
  997. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  998. stbi__result_info* ri, int bpc) {
  999. memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields
  1000. ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed
  1001. ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but
  1002. // this is here so we can add BGR order
  1003. ri->num_channels = 0;
  1004. // test the formats with a very explicit header first (at least a FOURCC
  1005. // or distinctive magic number first)
  1006. #ifndef STBI_NO_PNG
  1007. if (stbi__png_test(s))
  1008. return stbi__png_load(s, x, y, comp, req_comp, ri);
  1009. #endif
  1010. #ifndef STBI_NO_BMP
  1011. if (stbi__bmp_test(s))
  1012. return stbi__bmp_load(s, x, y, comp, req_comp, ri);
  1013. #endif
  1014. #ifndef STBI_NO_GIF
  1015. if (stbi__gif_test(s))
  1016. return stbi__gif_load(s, x, y, comp, req_comp, ri);
  1017. #endif
  1018. #ifndef STBI_NO_PSD
  1019. if (stbi__psd_test(s))
  1020. return stbi__psd_load(s, x, y, comp, req_comp, ri, bpc);
  1021. #else
  1022. STBI_NOTUSED(bpc);
  1023. #endif
  1024. #ifndef STBI_NO_PIC
  1025. if (stbi__pic_test(s))
  1026. return stbi__pic_load(s, x, y, comp, req_comp, ri);
  1027. #endif
  1028. // then the formats that can end up attempting to load with just 1 or 2
  1029. // bytes matching expectations; these are prone to false positives, so
  1030. // try them later
  1031. #ifndef STBI_NO_JPEG
  1032. if (stbi__jpeg_test(s))
  1033. return stbi__jpeg_load(s, x, y, comp, req_comp, ri);
  1034. #endif
  1035. #ifndef STBI_NO_PNM
  1036. if (stbi__pnm_test(s))
  1037. return stbi__pnm_load(s, x, y, comp, req_comp, ri);
  1038. #endif
  1039. #ifndef STBI_NO_HDR
  1040. if (stbi__hdr_test(s)) {
  1041. float* hdr = stbi__hdr_load(s, x, y, comp, req_comp, ri);
  1042. return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp);
  1043. }
  1044. #endif
  1045. #ifndef STBI_NO_TGA
  1046. // test tga last because it's a crappy test!
  1047. if (stbi__tga_test(s))
  1048. return stbi__tga_load(s, x, y, comp, req_comp, ri);
  1049. #endif
  1050. return stbi__errpuc(
  1051. "unknown image type", "Image not of any known type, or corrupt");
  1052. }
  1053. static stbi_uc* stbi__convert_16_to_8(stbi__uint16* orig, int w, int h, int channels) {
  1054. int i;
  1055. int img_len = w * h * channels;
  1056. stbi_uc* reduced;
  1057. reduced = (stbi_uc*)stbi__malloc(img_len);
  1058. if (reduced == NULL)
  1059. return stbi__errpuc("outofmem", "Out of memory");
  1060. for (i = 0; i < img_len; ++i)
  1061. reduced[i] =
  1062. (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient
  1063. // approx of 16->8 bit scaling
  1064. STBI_FREE(orig);
  1065. return reduced;
  1066. }
  1067. static stbi__uint16* stbi__convert_8_to_16(stbi_uc* orig, int w, int h, int channels) {
  1068. int i;
  1069. int img_len = w * h * channels;
  1070. stbi__uint16* enlarged;
  1071. enlarged = (stbi__uint16*)stbi__malloc(img_len * 2);
  1072. if (enlarged == NULL)
  1073. return (stbi__uint16*)stbi__errpuc("outofmem", "Out of memory");
  1074. for (i = 0; i < img_len; ++i)
  1075. enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high
  1076. // and low byte, maps
  1077. // 0->0, 255->0xffff
  1078. STBI_FREE(orig);
  1079. return enlarged;
  1080. }
  1081. static void stbi__vertical_flip(void* image, int w, int h, int bytes_per_pixel) {
  1082. int row;
  1083. size_t bytes_per_row = (size_t)w * bytes_per_pixel;
  1084. stbi_uc temp[2048];
  1085. stbi_uc* bytes = (stbi_uc*)image;
  1086. for (row = 0; row < (h >> 1); row++) {
  1087. stbi_uc* row0 = bytes + row * bytes_per_row;
  1088. stbi_uc* row1 = bytes + (h - row - 1) * bytes_per_row;
  1089. // swap row0 with row1
  1090. size_t bytes_left = bytes_per_row;
  1091. while (bytes_left) {
  1092. size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp);
  1093. memcpy(temp, row0, bytes_copy);
  1094. memcpy(row0, row1, bytes_copy);
  1095. memcpy(row1, temp, bytes_copy);
  1096. row0 += bytes_copy;
  1097. row1 += bytes_copy;
  1098. bytes_left -= bytes_copy;
  1099. }
  1100. }
  1101. }
  1102. #ifndef STBI_NO_GIF
  1103. static void stbi__vertical_flip_slices(
  1104. void* image, int w, int h, int z, int bytes_per_pixel) {
  1105. int slice;
  1106. int slice_size = w * h * bytes_per_pixel;
  1107. stbi_uc* bytes = (stbi_uc*)image;
  1108. for (slice = 0; slice < z; ++slice) {
  1109. stbi__vertical_flip(bytes, w, h, bytes_per_pixel);
  1110. bytes += slice_size;
  1111. }
  1112. }
  1113. #endif
  1114. static unsigned char* stbi__load_and_postprocess_8bit(
  1115. stbi__context* s, int* x, int* y, int* comp, int req_comp) {
  1116. stbi__result_info ri;
  1117. void* result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8);
  1118. if (result == NULL)
  1119. return NULL;
  1120. // it is the responsibility of the loaders to make sure we get either 8 or 16 bit.
  1121. STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16);
  1122. if (ri.bits_per_channel != 8) {
  1123. result = stbi__convert_16_to_8(
  1124. (stbi__uint16*)result, *x, *y, req_comp == 0 ? *comp : req_comp);
  1125. ri.bits_per_channel = 8;
  1126. }
  1127. // @TODO: move stbi__convert_format to here
  1128. if (stbi__vertically_flip_on_load) {
  1129. int channels = req_comp ? req_comp : *comp;
  1130. stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc));
  1131. }
  1132. return (unsigned char*)result;
  1133. }
  1134. static stbi__uint16* stbi__load_and_postprocess_16bit(
  1135. stbi__context* s, int* x, int* y, int* comp, int req_comp) {
  1136. stbi__result_info ri;
  1137. void* result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16);
  1138. if (result == NULL)
  1139. return NULL;
  1140. // it is the responsibility of the loaders to make sure we get either 8 or 16 bit.
  1141. STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16);
  1142. if (ri.bits_per_channel != 16) {
  1143. result = stbi__convert_8_to_16(
  1144. (stbi_uc*)result, *x, *y, req_comp == 0 ? *comp : req_comp);
  1145. ri.bits_per_channel = 16;
  1146. }
  1147. // @TODO: move stbi__convert_format16 to here
  1148. // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep
  1149. // more precision
  1150. if (stbi__vertically_flip_on_load) {
  1151. int channels = req_comp ? req_comp : *comp;
  1152. stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16));
  1153. }
  1154. return (stbi__uint16*)result;
  1155. }
  1156. #if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR)
  1157. static void stbi__float_postprocess(
  1158. float* result, int* x, int* y, int* comp, int req_comp) {
  1159. if (stbi__vertically_flip_on_load && result != NULL) {
  1160. int channels = req_comp ? req_comp : *comp;
  1161. stbi__vertical_flip(result, *x, *y, channels * sizeof(float));
  1162. }
  1163. }
  1164. #endif
  1165. #ifndef STBI_NO_STDIO
  1166. #if defined(_WIN32) && defined(STBI_WINDOWS_UTF8)
  1167. STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(
  1168. unsigned int cp, unsigned long flags, const char* str, int cbmb,
  1169. wchar_t* widestr, int cchwide);
  1170. STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(
  1171. unsigned int cp, unsigned long flags, const wchar_t* widestr, int cchwide,
  1172. char* str, int cbmb, const char* defchar, int* used_default);
  1173. #endif
  1174. #if defined(_WIN32) && defined(STBI_WINDOWS_UTF8)
  1175. STBIDEF int stbi_convert_wchar_to_utf8(
  1176. char* buffer, size_t bufferlen, const wchar_t* input) {
  1177. return WideCharToMultiByte(
  1178. 65001 /* UTF8 */, 0, input, -1, buffer, (int)bufferlen, NULL, NULL);
  1179. }
  1180. #endif
  1181. static FILE* stbi__fopen(char const* filename, char const* mode) {
  1182. FILE* f;
  1183. #if defined(_WIN32) && defined(STBI_WINDOWS_UTF8)
  1184. wchar_t wMode[64];
  1185. wchar_t wFilename[1024];
  1186. if (0 == MultiByteToWideChar(
  1187. 65001 /* UTF8 */, 0, filename, -1, wFilename,
  1188. sizeof(wFilename) / sizeof(*wFilename)))
  1189. return 0;
  1190. if (0 ==
  1191. MultiByteToWideChar(
  1192. 65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode) / sizeof(*wMode)))
  1193. return 0;
  1194. #if defined(_MSC_VER) && _MSC_VER >= 1400
  1195. if (0 != _wfopen_s(&f, wFilename, wMode))
  1196. f = 0;
  1197. #else
  1198. f = _wfopen(wFilename, wMode);
  1199. #endif
  1200. #elif defined(_MSC_VER) && _MSC_VER >= 1400
  1201. if (0 != fopen_s(&f, filename, mode))
  1202. f = 0;
  1203. #else
  1204. f = fopen(filename, mode);
  1205. #endif
  1206. return f;
  1207. }
  1208. STBIDEF stbi_uc* stbi_load(
  1209. char const* filename, int* x, int* y, int* comp, int req_comp) {
  1210. FILE* f = stbi__fopen(filename, "rb");
  1211. unsigned char* result;
  1212. if (!f)
  1213. return stbi__errpuc("can't fopen", "Unable to open file");
  1214. result = stbi_load_from_file(f, x, y, comp, req_comp);
  1215. fclose(f);
  1216. return result;
  1217. }
  1218. STBIDEF stbi_uc* stbi_load_from_file(FILE* f, int* x, int* y, int* comp, int req_comp) {
  1219. unsigned char* result;
  1220. stbi__context s;
  1221. stbi__start_file(&s, f);
  1222. result = stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp);
  1223. if (result) {
  1224. // need to 'unget' all the characters in the IO buffer
  1225. fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR);
  1226. }
  1227. return result;
  1228. }
  1229. STBIDEF stbi__uint16* stbi_load_from_file_16(
  1230. FILE* f, int* x, int* y, int* comp, int req_comp) {
  1231. stbi__uint16* result;
  1232. stbi__context s;
  1233. stbi__start_file(&s, f);
  1234. result = stbi__load_and_postprocess_16bit(&s, x, y, comp, req_comp);
  1235. if (result) {
  1236. // need to 'unget' all the characters in the IO buffer
  1237. fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR);
  1238. }
  1239. return result;
  1240. }
  1241. STBIDEF stbi_us* stbi_load_16(
  1242. char const* filename, int* x, int* y, int* comp, int req_comp) {
  1243. FILE* f = stbi__fopen(filename, "rb");
  1244. stbi__uint16* result;
  1245. if (!f)
  1246. return (stbi_us*)stbi__errpuc("can't fopen", "Unable to open file");
  1247. result = stbi_load_from_file_16(f, x, y, comp, req_comp);
  1248. fclose(f);
  1249. return result;
  1250. }
  1251. #endif //! STBI_NO_STDIO
  1252. STBIDEF stbi_us* stbi_load_16_from_memory(
  1253. stbi_uc const* buffer, int len, int* x, int* y, int* channels_in_file,
  1254. int desired_channels) {
  1255. stbi__context s;
  1256. stbi__start_mem(&s, buffer, len);
  1257. return stbi__load_and_postprocess_16bit(
  1258. &s, x, y, channels_in_file, desired_channels);
  1259. }
  1260. STBIDEF stbi_us* stbi_load_16_from_callbacks(
  1261. stbi_io_callbacks const* clbk, void* user, int* x, int* y,
  1262. int* channels_in_file, int desired_channels) {
  1263. stbi__context s;
  1264. stbi__start_callbacks(&s, (stbi_io_callbacks*)clbk, user);
  1265. return stbi__load_and_postprocess_16bit(
  1266. &s, x, y, channels_in_file, desired_channels);
  1267. }
  1268. STBIDEF stbi_uc* stbi_load_from_memory(
  1269. stbi_uc const* buffer, int len, int* x, int* y, int* comp, int req_comp) {
  1270. stbi__context s;
  1271. stbi__start_mem(&s, buffer, len);
  1272. return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp);
  1273. }
  1274. STBIDEF stbi_uc* stbi_load_from_callbacks(
  1275. stbi_io_callbacks const* clbk, void* user, int* x, int* y, int* comp,
  1276. int req_comp) {
  1277. stbi__context s;
  1278. stbi__start_callbacks(&s, (stbi_io_callbacks*)clbk, user);
  1279. return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp);
  1280. }
  1281. #ifndef STBI_NO_GIF
  1282. STBIDEF stbi_uc* stbi_load_gif_from_memory(
  1283. stbi_uc const* buffer, int len, int** delays, int* x, int* y, int* z, int* comp,
  1284. int req_comp) {
  1285. unsigned char* result;
  1286. stbi__context s;
  1287. stbi__start_mem(&s, buffer, len);
  1288. result = (unsigned char*)stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp);
  1289. if (stbi__vertically_flip_on_load) {
  1290. stbi__vertical_flip_slices(result, *x, *y, *z, *comp);
  1291. }
  1292. return result;
  1293. }
  1294. #endif
  1295. #ifndef STBI_NO_LINEAR
  1296. static float* stbi__loadf_main(
  1297. stbi__context* s, int* x, int* y, int* comp, int req_comp) {
  1298. unsigned char* data;
  1299. #ifndef STBI_NO_HDR
  1300. if (stbi__hdr_test(s)) {
  1301. stbi__result_info ri;
  1302. float* hdr_data = stbi__hdr_load(s, x, y, comp, req_comp, &ri);
  1303. if (hdr_data)
  1304. stbi__float_postprocess(hdr_data, x, y, comp, req_comp);
  1305. return hdr_data;
  1306. }
  1307. #endif
  1308. data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp);
  1309. if (data)
  1310. return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp);
  1311. return stbi__errpf("unknown image type", "Image not of any known type, or corrupt");
  1312. }
  1313. STBIDEF float* stbi_loadf_from_memory(
  1314. stbi_uc const* buffer, int len, int* x, int* y, int* comp, int req_comp) {
  1315. stbi__context s;
  1316. stbi__start_mem(&s, buffer, len);
  1317. return stbi__loadf_main(&s, x, y, comp, req_comp);
  1318. }
  1319. STBIDEF float* stbi_loadf_from_callbacks(
  1320. stbi_io_callbacks const* clbk, void* user, int* x, int* y, int* comp,
  1321. int req_comp) {
  1322. stbi__context s;
  1323. stbi__start_callbacks(&s, (stbi_io_callbacks*)clbk, user);
  1324. return stbi__loadf_main(&s, x, y, comp, req_comp);
  1325. }
  1326. #ifndef STBI_NO_STDIO
  1327. STBIDEF float* stbi_loadf(
  1328. char const* filename, int* x, int* y, int* comp, int req_comp) {
  1329. float* result;
  1330. FILE* f = stbi__fopen(filename, "rb");
  1331. if (!f)
  1332. return stbi__errpf("can't fopen", "Unable to open file");
  1333. result = stbi_loadf_from_file(f, x, y, comp, req_comp);
  1334. fclose(f);
  1335. return result;
  1336. }
  1337. STBIDEF float* stbi_loadf_from_file(FILE* f, int* x, int* y, int* comp, int req_comp) {
  1338. stbi__context s;
  1339. stbi__start_file(&s, f);
  1340. return stbi__loadf_main(&s, x, y, comp, req_comp);
  1341. }
  1342. #endif // !STBI_NO_STDIO
  1343. #endif // !STBI_NO_LINEAR
  1344. // these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is
  1345. // defined, for API simplicity; if STBI_NO_LINEAR is defined, it always
  1346. // reports false!
  1347. STBIDEF int stbi_is_hdr_from_memory(stbi_uc const* buffer, int len) {
  1348. #ifndef STBI_NO_HDR
  1349. stbi__context s;
  1350. stbi__start_mem(&s, buffer, len);
  1351. return stbi__hdr_test(&s);
  1352. #else
  1353. STBI_NOTUSED(buffer);
  1354. STBI_NOTUSED(len);
  1355. return 0;
  1356. #endif
  1357. }
  1358. #ifndef STBI_NO_STDIO
  1359. STBIDEF int stbi_is_hdr(char const* filename) {
  1360. FILE* f = stbi__fopen(filename, "rb");
  1361. int result = 0;
  1362. if (f) {
  1363. result = stbi_is_hdr_from_file(f);
  1364. fclose(f);
  1365. }
  1366. return result;
  1367. }
  1368. STBIDEF int stbi_is_hdr_from_file(FILE* f) {
  1369. #ifndef STBI_NO_HDR
  1370. long pos = ftell(f);
  1371. int res;
  1372. stbi__context s;
  1373. stbi__start_file(&s, f);
  1374. res = stbi__hdr_test(&s);
  1375. fseek(f, pos, SEEK_SET);
  1376. return res;
  1377. #else
  1378. STBI_NOTUSED(f);
  1379. return 0;
  1380. #endif
  1381. }
  1382. #endif // !STBI_NO_STDIO
  1383. STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const* clbk, void* user) {
  1384. #ifndef STBI_NO_HDR
  1385. stbi__context s;
  1386. stbi__start_callbacks(&s, (stbi_io_callbacks*)clbk, user);
  1387. return stbi__hdr_test(&s);
  1388. #else
  1389. STBI_NOTUSED(clbk);
  1390. STBI_NOTUSED(user);
  1391. return 0;
  1392. #endif
  1393. }
  1394. #ifndef STBI_NO_LINEAR
  1395. static float stbi__l2h_gamma = 2.2f, stbi__l2h_scale = 1.0f;
  1396. STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) {
  1397. stbi__l2h_gamma = gamma;
  1398. }
  1399. STBIDEF void stbi_ldr_to_hdr_scale(float scale) {
  1400. stbi__l2h_scale = scale;
  1401. }
  1402. #endif
  1403. static float stbi__h2l_gamma_i = 1.0f / 2.2f, stbi__h2l_scale_i = 1.0f;
  1404. STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) {
  1405. stbi__h2l_gamma_i = 1 / gamma;
  1406. }
  1407. STBIDEF void stbi_hdr_to_ldr_scale(float scale) {
  1408. stbi__h2l_scale_i = 1 / scale;
  1409. }
  1410. //////////////////////////////////////////////////////////////////////////////
  1411. //
  1412. // Common code used by all image loaders
  1413. //
  1414. enum { STBI__SCAN_load = 0, STBI__SCAN_type, STBI__SCAN_header };
  1415. static void stbi__refill_buffer(stbi__context* s) {
  1416. int n = (s->io.read)(s->io_user_data, (char*)s->buffer_start, s->buflen);
  1417. s->callback_already_read += (int)(s->img_buffer - s->img_buffer_original);
  1418. if (n == 0) {
  1419. // at end of file, treat same as if from memory, but need to handle case
  1420. // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file
  1421. s->read_from_callbacks = 0;
  1422. s->img_buffer = s->buffer_start;
  1423. s->img_buffer_end = s->buffer_start + 1;
  1424. *s->img_buffer = 0;
  1425. } else {
  1426. s->img_buffer = s->buffer_start;
  1427. s->img_buffer_end = s->buffer_start + n;
  1428. }
  1429. }
  1430. stbi_inline static stbi_uc stbi__get8(stbi__context* s) {
  1431. if (s->img_buffer < s->img_buffer_end)
  1432. return *s->img_buffer++;
  1433. if (s->read_from_callbacks) {
  1434. stbi__refill_buffer(s);
  1435. return *s->img_buffer++;
  1436. }
  1437. return 0;
  1438. }
  1439. #if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && \
  1440. defined(STBI_NO_PNM)
  1441. // nothing
  1442. #else
  1443. stbi_inline static int stbi__at_eof(stbi__context* s) {
  1444. if (s->io.read) {
  1445. if (!(s->io.eof)(s->io_user_data))
  1446. return 0;
  1447. // if feof() is true, check if buffer = end
  1448. // special case: we've only got the special 0 character at the end
  1449. if (s->read_from_callbacks == 0)
  1450. return 1;
  1451. }
  1452. return s->img_buffer >= s->img_buffer_end;
  1453. }
  1454. #endif
  1455. #if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && \
  1456. defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && \
  1457. defined(STBI_NO_PIC)
  1458. // nothing
  1459. #else
  1460. static void stbi__skip(stbi__context* s, int n) {
  1461. if (n == 0)
  1462. return; // already there!
  1463. if (n < 0) {
  1464. s->img_buffer = s->img_buffer_end;
  1465. return;
  1466. }
  1467. if (s->io.read) {
  1468. int blen = (int)(s->img_buffer_end - s->img_buffer);
  1469. if (blen < n) {
  1470. s->img_buffer = s->img_buffer_end;
  1471. (s->io.skip)(s->io_user_data, n - blen);
  1472. return;
  1473. }
  1474. }
  1475. s->img_buffer += n;
  1476. }
  1477. #endif
  1478. #if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && \
  1479. defined(STBI_NO_PNM)
  1480. // nothing
  1481. #else
  1482. static int stbi__getn(stbi__context* s, stbi_uc* buffer, int n) {
  1483. if (s->io.read) {
  1484. int blen = (int)(s->img_buffer_end - s->img_buffer);
  1485. if (blen < n) {
  1486. int res, count;
  1487. memcpy(buffer, s->img_buffer, blen);
  1488. count = (s->io.read)(s->io_user_data, (char*)buffer + blen, n - blen);
  1489. res = (count == (n - blen));
  1490. s->img_buffer = s->img_buffer_end;
  1491. return res;
  1492. }
  1493. }
  1494. if (s->img_buffer + n <= s->img_buffer_end) {
  1495. memcpy(buffer, s->img_buffer, n);
  1496. s->img_buffer += n;
  1497. return 1;
  1498. } else
  1499. return 0;
  1500. }
  1501. #endif
  1502. #if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && \
  1503. defined(STBI_NO_PIC)
  1504. // nothing
  1505. #else
  1506. static int stbi__get16be(stbi__context* s) {
  1507. int z = stbi__get8(s);
  1508. return (z << 8) + stbi__get8(s);
  1509. }
  1510. #endif
  1511. #if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC)
  1512. // nothing
  1513. #else
  1514. static stbi__uint32 stbi__get32be(stbi__context* s) {
  1515. stbi__uint32 z = stbi__get16be(s);
  1516. return (z << 16) + stbi__get16be(s);
  1517. }
  1518. #endif
  1519. #if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF)
  1520. // nothing
  1521. #else
  1522. static int stbi__get16le(stbi__context* s) {
  1523. int z = stbi__get8(s);
  1524. return z + (stbi__get8(s) << 8);
  1525. }
  1526. #endif
  1527. #ifndef STBI_NO_BMP
  1528. static stbi__uint32 stbi__get32le(stbi__context* s) {
  1529. stbi__uint32 z = stbi__get16le(s);
  1530. z += (stbi__uint32)stbi__get16le(s) << 16;
  1531. return z;
  1532. }
  1533. #endif
  1534. #define STBI__BYTECAST(x) ((stbi_uc)((x)&255)) // truncate int to byte without warnings
  1535. #if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && \
  1536. defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && \
  1537. defined(STBI_NO_PIC) && defined(STBI_NO_PNM)
  1538. // nothing
  1539. #else
  1540. //////////////////////////////////////////////////////////////////////////////
  1541. //
  1542. // generic converter from built-in img_n to req_comp
  1543. // individual types do this automatically as much as possible (e.g. jpeg
  1544. // does all cases internally since it needs to colorspace convert anyway,
  1545. // and it never has alpha, so very few cases ). png can automatically
  1546. // interleave an alpha=255 channel, but falls back to this for other cases
  1547. //
  1548. // assume data buffer is malloced, so malloc a new one and free that one
  1549. // only failure mode is malloc failing
  1550. static stbi_uc stbi__compute_y(int r, int g, int b) {
  1551. return (stbi_uc)(((r * 77) + (g * 150) + (29 * b)) >> 8);
  1552. }
  1553. #endif
  1554. #if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && \
  1555. defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && \
  1556. defined(STBI_NO_PNM)
  1557. // nothing
  1558. #else
  1559. static unsigned char* stbi__convert_format(
  1560. unsigned char* data, int img_n, int req_comp, unsigned int x, unsigned int y) {
  1561. int i, j;
  1562. unsigned char* good;
  1563. if (req_comp == img_n)
  1564. return data;
  1565. STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
  1566. good = (unsigned char*)stbi__malloc_mad3(req_comp, x, y, 0);
  1567. if (good == NULL) {
  1568. STBI_FREE(data);
  1569. return stbi__errpuc("outofmem", "Out of memory");
  1570. }
  1571. for (j = 0; j < (int)y; ++j) {
  1572. unsigned char* src = data + j * x * img_n;
  1573. unsigned char* dest = good + j * x * req_comp;
  1574. #define STBI__COMBO(a, b) ((a)*8 + (b))
  1575. #define STBI__CASE(a, b) \
  1576. case STBI__COMBO(a, b): \
  1577. for (i = x - 1; i >= 0; --i, src += a, dest += b)
  1578. // convert source image with img_n components to one with req_comp components;
  1579. // avoid switch per pixel, so use switch per scanline and massive macros
  1580. switch (STBI__COMBO(img_n, req_comp)) {
  1581. STBI__CASE(1, 2) {
  1582. dest[0] = src[0];
  1583. dest[1] = 255;
  1584. }
  1585. break;
  1586. STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; }
  1587. break;
  1588. STBI__CASE(1, 4) {
  1589. dest[0] = dest[1] = dest[2] = src[0];
  1590. dest[3] = 255;
  1591. }
  1592. break;
  1593. STBI__CASE(2, 1) { dest[0] = src[0]; }
  1594. break;
  1595. STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; }
  1596. break;
  1597. STBI__CASE(2, 4) {
  1598. dest[0] = dest[1] = dest[2] = src[0];
  1599. dest[3] = src[1];
  1600. }
  1601. break;
  1602. STBI__CASE(3, 4) {
  1603. dest[0] = src[0];
  1604. dest[1] = src[1];
  1605. dest[2] = src[2];
  1606. dest[3] = 255;
  1607. }
  1608. break;
  1609. STBI__CASE(3, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); }
  1610. break;
  1611. STBI__CASE(3, 2) {
  1612. dest[0] = stbi__compute_y(src[0], src[1], src[2]);
  1613. dest[1] = 255;
  1614. }
  1615. break;
  1616. STBI__CASE(4, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); }
  1617. break;
  1618. STBI__CASE(4, 2) {
  1619. dest[0] = stbi__compute_y(src[0], src[1], src[2]);
  1620. dest[1] = src[3];
  1621. }
  1622. break;
  1623. STBI__CASE(4, 3) {
  1624. dest[0] = src[0];
  1625. dest[1] = src[1];
  1626. dest[2] = src[2];
  1627. }
  1628. break;
  1629. default:
  1630. STBI_ASSERT(0);
  1631. STBI_FREE(data);
  1632. STBI_FREE(good);
  1633. return stbi__errpuc("unsupported", "Unsupported format conversion");
  1634. }
  1635. #undef STBI__CASE
  1636. }
  1637. STBI_FREE(data);
  1638. return good;
  1639. }
  1640. #endif
  1641. #if defined(STBI_NO_PNG) && defined(STBI_NO_PSD)
  1642. // nothing
  1643. #else
  1644. static stbi__uint16 stbi__compute_y_16(int r, int g, int b) {
  1645. return (stbi__uint16)(((r * 77) + (g * 150) + (29 * b)) >> 8);
  1646. }
  1647. #endif
  1648. #if defined(STBI_NO_PNG) && defined(STBI_NO_PSD)
  1649. // nothing
  1650. #else
  1651. static stbi__uint16* stbi__convert_format16(
  1652. stbi__uint16* data, int img_n, int req_comp, unsigned int x, unsigned int y) {
  1653. int i, j;
  1654. stbi__uint16* good;
  1655. if (req_comp == img_n)
  1656. return data;
  1657. STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
  1658. good = (stbi__uint16*)stbi__malloc(req_comp * x * y * 2);
  1659. if (good == NULL) {
  1660. STBI_FREE(data);
  1661. return (stbi__uint16*)stbi__errpuc("outofmem", "Out of memory");
  1662. }
  1663. for (j = 0; j < (int)y; ++j) {
  1664. stbi__uint16* src = data + j * x * img_n;
  1665. stbi__uint16* dest = good + j * x * req_comp;
  1666. #define STBI__COMBO(a, b) ((a)*8 + (b))
  1667. #define STBI__CASE(a, b) \
  1668. case STBI__COMBO(a, b): \
  1669. for (i = x - 1; i >= 0; --i, src += a, dest += b)
  1670. // convert source image with img_n components to one with req_comp components;
  1671. // avoid switch per pixel, so use switch per scanline and massive macros
  1672. switch (STBI__COMBO(img_n, req_comp)) {
  1673. STBI__CASE(1, 2) {
  1674. dest[0] = src[0];
  1675. dest[1] = 0xffff;
  1676. }
  1677. break;
  1678. STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; }
  1679. break;
  1680. STBI__CASE(1, 4) {
  1681. dest[0] = dest[1] = dest[2] = src[0];
  1682. dest[3] = 0xffff;
  1683. }
  1684. break;
  1685. STBI__CASE(2, 1) { dest[0] = src[0]; }
  1686. break;
  1687. STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; }
  1688. break;
  1689. STBI__CASE(2, 4) {
  1690. dest[0] = dest[1] = dest[2] = src[0];
  1691. dest[3] = src[1];
  1692. }
  1693. break;
  1694. STBI__CASE(3, 4) {
  1695. dest[0] = src[0];
  1696. dest[1] = src[1];
  1697. dest[2] = src[2];
  1698. dest[3] = 0xffff;
  1699. }
  1700. break;
  1701. STBI__CASE(3, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); }
  1702. break;
  1703. STBI__CASE(3, 2) {
  1704. dest[0] = stbi__compute_y_16(src[0], src[1], src[2]);
  1705. dest[1] = 0xffff;
  1706. }
  1707. break;
  1708. STBI__CASE(4, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); }
  1709. break;
  1710. STBI__CASE(4, 2) {
  1711. dest[0] = stbi__compute_y_16(src[0], src[1], src[2]);
  1712. dest[1] = src[3];
  1713. }
  1714. break;
  1715. STBI__CASE(4, 3) {
  1716. dest[0] = src[0];
  1717. dest[1] = src[1];
  1718. dest[2] = src[2];
  1719. }
  1720. break;
  1721. default:
  1722. STBI_ASSERT(0);
  1723. STBI_FREE(data);
  1724. STBI_FREE(good);
  1725. return (stbi__uint16*)stbi__errpuc(
  1726. "unsupported", "Unsupported format conversion");
  1727. }
  1728. #undef STBI__CASE
  1729. }
  1730. STBI_FREE(data);
  1731. return good;
  1732. }
  1733. #endif
  1734. #ifndef STBI_NO_LINEAR
  1735. static float* stbi__ldr_to_hdr(stbi_uc* data, int x, int y, int comp) {
  1736. int i, k, n;
  1737. float* output;
  1738. if (!data)
  1739. return NULL;
  1740. output = (float*)stbi__malloc_mad4(x, y, comp, sizeof(float), 0);
  1741. if (output == NULL) {
  1742. STBI_FREE(data);
  1743. return stbi__errpf("outofmem", "Out of memory");
  1744. }
  1745. // compute number of non-alpha components
  1746. if (comp & 1)
  1747. n = comp;
  1748. else
  1749. n = comp - 1;
  1750. for (i = 0; i < x * y; ++i) {
  1751. for (k = 0; k < n; ++k) {
  1752. output[i * comp + k] =
  1753. (float)(pow(data[i * comp + k] / 255.0f, stbi__l2h_gamma) *
  1754. stbi__l2h_scale);
  1755. }
  1756. }
  1757. if (n < comp) {
  1758. for (i = 0; i < x * y; ++i) {
  1759. output[i * comp + n] = data[i * comp + n] / 255.0f;
  1760. }
  1761. }
  1762. STBI_FREE(data);
  1763. return output;
  1764. }
  1765. #endif
  1766. #ifndef STBI_NO_HDR
  1767. #define stbi__float2int(x) ((int)(x))
  1768. static stbi_uc* stbi__hdr_to_ldr(float* data, int x, int y, int comp) {
  1769. int i, k, n;
  1770. stbi_uc* output;
  1771. if (!data)
  1772. return NULL;
  1773. output = (stbi_uc*)stbi__malloc_mad3(x, y, comp, 0);
  1774. if (output == NULL) {
  1775. STBI_FREE(data);
  1776. return stbi__errpuc("outofmem", "Out of memory");
  1777. }
  1778. // compute number of non-alpha components
  1779. if (comp & 1)
  1780. n = comp;
  1781. else
  1782. n = comp - 1;
  1783. for (i = 0; i < x * y; ++i) {
  1784. for (k = 0; k < n; ++k) {
  1785. float z =
  1786. (float)pow(
  1787. data[i * comp + k] * stbi__h2l_scale_i, stbi__h2l_gamma_i) *
  1788. 255 +
  1789. 0.5f;
  1790. if (z < 0)
  1791. z = 0;
  1792. if (z > 255)
  1793. z = 255;
  1794. output[i * comp + k] = (stbi_uc)stbi__float2int(z);
  1795. }
  1796. if (k < comp) {
  1797. float z = data[i * comp + k] * 255 + 0.5f;
  1798. if (z < 0)
  1799. z = 0;
  1800. if (z > 255)
  1801. z = 255;
  1802. output[i * comp + k] = (stbi_uc)stbi__float2int(z);
  1803. }
  1804. }
  1805. STBI_FREE(data);
  1806. return output;
  1807. }
  1808. #endif
  1809. //////////////////////////////////////////////////////////////////////////////
  1810. //
  1811. // "baseline" JPEG/JFIF decoder
  1812. //
  1813. // simple implementation
  1814. // - doesn't support delayed output of y-dimension
  1815. // - simple interface (only one output format: 8-bit interleaved RGB)
  1816. // - doesn't try to recover corrupt jpegs
  1817. // - doesn't allow partial loading, loading multiple at once
  1818. // - still fast on x86 (copying globals into locals doesn't help x86)
  1819. // - allocates lots of intermediate memory (full size of all components)
  1820. // - non-interleaved case requires this anyway
  1821. // - allows good upsampling (see next)
  1822. // high-quality
  1823. // - upsampled channels are bilinearly interpolated, even across blocks
  1824. // - quality integer IDCT derived from IJG's 'slow'
  1825. // performance
  1826. // - fast huffman; reasonable integer IDCT
  1827. // - some SIMD kernels for common paths on targets with SSE2/NEON
  1828. // - uses a lot of intermediate memory, could cache poorly
  1829. #ifndef STBI_NO_JPEG
  1830. // huffman decoding acceleration
  1831. #define FAST_BITS 9 // larger handles more cases; smaller stomps less cache
  1832. typedef struct {
  1833. stbi_uc fast[1 << FAST_BITS];
  1834. // weirdly, repacking this into AoS is a 10% speed loss, instead of a win
  1835. stbi__uint16 code[256];
  1836. stbi_uc values[256];
  1837. stbi_uc size[257];
  1838. unsigned int maxcode[18];
  1839. int delta[17]; // old 'firstsymbol' - old 'firstcode'
  1840. } stbi__huffman;
  1841. typedef struct {
  1842. stbi__context* s;
  1843. stbi__huffman huff_dc[4];
  1844. stbi__huffman huff_ac[4];
  1845. stbi__uint16 dequant[4][64];
  1846. stbi__int16 fast_ac[4][1 << FAST_BITS];
  1847. // sizes for components, interleaved MCUs
  1848. int img_h_max, img_v_max;
  1849. int img_mcu_x, img_mcu_y;
  1850. int img_mcu_w, img_mcu_h;
  1851. // definition of jpeg image component
  1852. struct {
  1853. int id;
  1854. int h, v;
  1855. int tq;
  1856. int hd, ha;
  1857. int dc_pred;
  1858. int x, y, w2, h2;
  1859. stbi_uc* data;
  1860. void *raw_data, *raw_coeff;
  1861. stbi_uc* linebuf;
  1862. short* coeff; // progressive only
  1863. int coeff_w, coeff_h; // number of 8x8 coefficient blocks
  1864. } img_comp[4];
  1865. stbi__uint32 code_buffer; // jpeg entropy-coded buffer
  1866. int code_bits; // number of valid bits
  1867. unsigned char marker; // marker seen while filling entropy buffer
  1868. int nomore; // flag if we saw a marker so must stop
  1869. int progressive;
  1870. int spec_start;
  1871. int spec_end;
  1872. int succ_high;
  1873. int succ_low;
  1874. int eob_run;
  1875. int jfif;
  1876. int app14_color_transform; // Adobe APP14 tag
  1877. int rgb;
  1878. int scan_n, order[4];
  1879. int restart_interval, todo;
  1880. // kernels
  1881. void (*idct_block_kernel)(stbi_uc* out, int out_stride, short data[64]);
  1882. void (*YCbCr_to_RGB_kernel)(
  1883. stbi_uc* out, const stbi_uc* y, const stbi_uc* pcb, const stbi_uc* pcr,
  1884. int count, int step);
  1885. stbi_uc* (*resample_row_hv_2_kernel)(
  1886. stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs);
  1887. } stbi__jpeg;
  1888. static int stbi__build_huffman(stbi__huffman* h, int* count) {
  1889. int i, j, k = 0;
  1890. unsigned int code;
  1891. // build size list for each symbol (from JPEG spec)
  1892. for (i = 0; i < 16; ++i)
  1893. for (j = 0; j < count[i]; ++j)
  1894. h->size[k++] = (stbi_uc)(i + 1);
  1895. h->size[k] = 0;
  1896. // compute actual symbols (from jpeg spec)
  1897. code = 0;
  1898. k = 0;
  1899. for (j = 1; j <= 16; ++j) {
  1900. // compute delta to add to code to compute symbol id
  1901. h->delta[j] = k - code;
  1902. if (h->size[k] == j) {
  1903. while (h->size[k] == j)
  1904. h->code[k++] = (stbi__uint16)(code++);
  1905. if (code - 1 >= (1u << j))
  1906. return stbi__err("bad code lengths", "Corrupt JPEG");
  1907. }
  1908. // compute largest code + 1 for this size, preshifted as needed later
  1909. h->maxcode[j] = code << (16 - j);
  1910. code <<= 1;
  1911. }
  1912. h->maxcode[j] = 0xffffffff;
  1913. // build non-spec acceleration table; 255 is flag for not-accelerated
  1914. memset(h->fast, 255, 1 << FAST_BITS);
  1915. for (i = 0; i < k; ++i) {
  1916. int s = h->size[i];
  1917. if (s <= FAST_BITS) {
  1918. int c = h->code[i] << (FAST_BITS - s);
  1919. int m = 1 << (FAST_BITS - s);
  1920. for (j = 0; j < m; ++j) {
  1921. h->fast[c + j] = (stbi_uc)i;
  1922. }
  1923. }
  1924. }
  1925. return 1;
  1926. }
  1927. // build a table that decodes both magnitude and value of small ACs in
  1928. // one go.
  1929. static void stbi__build_fast_ac(stbi__int16* fast_ac, stbi__huffman* h) {
  1930. int i;
  1931. for (i = 0; i < (1 << FAST_BITS); ++i) {
  1932. stbi_uc fast = h->fast[i];
  1933. fast_ac[i] = 0;
  1934. if (fast < 255) {
  1935. int rs = h->values[fast];
  1936. int run = (rs >> 4) & 15;
  1937. int magbits = rs & 15;
  1938. int len = h->size[fast];
  1939. if (magbits && len + magbits <= FAST_BITS) {
  1940. // magnitude code followed by receive_extend code
  1941. int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits);
  1942. int m = 1 << (magbits - 1);
  1943. if (k < m)
  1944. k += (~0U << magbits) + 1;
  1945. // if the result is small enough, we can fit it in fast_ac table
  1946. if (k >= -128 && k <= 127)
  1947. fast_ac[i] =
  1948. (stbi__int16)((k * 256) + (run * 16) + (len + magbits));
  1949. }
  1950. }
  1951. }
  1952. }
  1953. static void stbi__grow_buffer_unsafe(stbi__jpeg* j) {
  1954. do {
  1955. unsigned int b = j->nomore ? 0 : stbi__get8(j->s);
  1956. if (b == 0xff) {
  1957. int c = stbi__get8(j->s);
  1958. while (c == 0xff)
  1959. c = stbi__get8(j->s); // consume fill bytes
  1960. if (c != 0) {
  1961. j->marker = (unsigned char)c;
  1962. j->nomore = 1;
  1963. return;
  1964. }
  1965. }
  1966. j->code_buffer |= b << (24 - j->code_bits);
  1967. j->code_bits += 8;
  1968. } while (j->code_bits <= 24);
  1969. }
  1970. // (1 << n) - 1
  1971. static const stbi__uint32 stbi__bmask[17] = {0, 1, 3, 7, 15, 31,
  1972. 63, 127, 255, 511, 1023, 2047,
  1973. 4095, 8191, 16383, 32767, 65535};
  1974. // decode a jpeg huffman value from the bitstream
  1975. stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg* j, stbi__huffman* h) {
  1976. unsigned int temp;
  1977. int c, k;
  1978. if (j->code_bits < 16)
  1979. stbi__grow_buffer_unsafe(j);
  1980. // look at the top FAST_BITS and determine what symbol ID it is,
  1981. // if the code is <= FAST_BITS
  1982. c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1);
  1983. k = h->fast[c];
  1984. if (k < 255) {
  1985. int s = h->size[k];
  1986. if (s > j->code_bits)
  1987. return -1;
  1988. j->code_buffer <<= s;
  1989. j->code_bits -= s;
  1990. return h->values[k];
  1991. }
  1992. // naive test is to shift the code_buffer down so k bits are
  1993. // valid, then test against maxcode. To speed this up, we've
  1994. // preshifted maxcode left so that it has (16-k) 0s at the
  1995. // end; in other words, regardless of the number of bits, it
  1996. // wants to be compared against something shifted to have 16;
  1997. // that way we don't need to shift inside the loop.
  1998. temp = j->code_buffer >> 16;
  1999. for (k = FAST_BITS + 1;; ++k)
  2000. if (temp < h->maxcode[k])
  2001. break;
  2002. if (k == 17) {
  2003. // error! code not found
  2004. j->code_bits -= 16;
  2005. return -1;
  2006. }
  2007. if (k > j->code_bits)
  2008. return -1;
  2009. // convert the huffman code to the symbol id
  2010. c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k];
  2011. STBI_ASSERT(
  2012. (((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) ==
  2013. h->code[c]);
  2014. // convert the id to a symbol
  2015. j->code_bits -= k;
  2016. j->code_buffer <<= k;
  2017. return h->values[c];
  2018. }
  2019. // bias[n] = (-1<<n) + 1
  2020. static const int stbi__jbias[16] = {0, -1, -3, -7, -15, -31,
  2021. -63, -127, -255, -511, -1023, -2047,
  2022. -4095, -8191, -16383, -32767};
  2023. // combined JPEG 'receive' and JPEG 'extend', since baseline
  2024. // always extends everything it receives.
  2025. stbi_inline static int stbi__extend_receive(stbi__jpeg* j, int n) {
  2026. unsigned int k;
  2027. int sgn;
  2028. if (j->code_bits < n)
  2029. stbi__grow_buffer_unsafe(j);
  2030. sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1
  2031. // if MSB set (negative)
  2032. k = stbi_lrot(j->code_buffer, n);
  2033. j->code_buffer = k & ~stbi__bmask[n];
  2034. k &= stbi__bmask[n];
  2035. j->code_bits -= n;
  2036. return k + (stbi__jbias[n] & (sgn - 1));
  2037. }
  2038. // get some unsigned bits
  2039. stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg* j, int n) {
  2040. unsigned int k;
  2041. if (j->code_bits < n)
  2042. stbi__grow_buffer_unsafe(j);
  2043. k = stbi_lrot(j->code_buffer, n);
  2044. j->code_buffer = k & ~stbi__bmask[n];
  2045. k &= stbi__bmask[n];
  2046. j->code_bits -= n;
  2047. return k;
  2048. }
  2049. stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg* j) {
  2050. unsigned int k;
  2051. if (j->code_bits < 1)
  2052. stbi__grow_buffer_unsafe(j);
  2053. k = j->code_buffer;
  2054. j->code_buffer <<= 1;
  2055. --j->code_bits;
  2056. return k & 0x80000000;
  2057. }
  2058. // given a value that's at position X in the zigzag stream,
  2059. // where does it appear in the 8x8 matrix coded as row-major?
  2060. static const stbi_uc stbi__jpeg_dezigzag[64 + 15] = {
  2061. 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48,
  2062. 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22,
  2063. 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55,
  2064. 62, 63,
  2065. // let corrupt input sample past end
  2066. 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63};
  2067. // decode one 64-entry block--
  2068. static int stbi__jpeg_decode_block(
  2069. stbi__jpeg* j, short data[64], stbi__huffman* hdc, stbi__huffman* hac,
  2070. stbi__int16* fac, int b, stbi__uint16* dequant) {
  2071. int diff, dc, k;
  2072. int t;
  2073. if (j->code_bits < 16)
  2074. stbi__grow_buffer_unsafe(j);
  2075. t = stbi__jpeg_huff_decode(j, hdc);
  2076. if (t < 0 || t > 15)
  2077. return stbi__err("bad huffman code", "Corrupt JPEG");
  2078. // 0 all the ac values now so we can do it 32-bits at a time
  2079. memset(data, 0, 64 * sizeof(data[0]));
  2080. diff = t ? stbi__extend_receive(j, t) : 0;
  2081. dc = j->img_comp[b].dc_pred + diff;
  2082. j->img_comp[b].dc_pred = dc;
  2083. data[0] = (short)(dc * dequant[0]);
  2084. // decode AC components, see JPEG spec
  2085. k = 1;
  2086. do {
  2087. unsigned int zig;
  2088. int c, r, s;
  2089. if (j->code_bits < 16)
  2090. stbi__grow_buffer_unsafe(j);
  2091. c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1);
  2092. r = fac[c];
  2093. if (r) { // fast-AC path
  2094. k += (r >> 4) & 15; // run
  2095. s = r & 15; // combined length
  2096. j->code_buffer <<= s;
  2097. j->code_bits -= s;
  2098. // decode into unzigzag'd location
  2099. zig = stbi__jpeg_dezigzag[k++];
  2100. data[zig] = (short)((r >> 8) * dequant[zig]);
  2101. } else {
  2102. int rs = stbi__jpeg_huff_decode(j, hac);
  2103. if (rs < 0)
  2104. return stbi__err("bad huffman code", "Corrupt JPEG");
  2105. s = rs & 15;
  2106. r = rs >> 4;
  2107. if (s == 0) {
  2108. if (rs != 0xf0)
  2109. break; // end block
  2110. k += 16;
  2111. } else {
  2112. k += r;
  2113. // decode into unzigzag'd location
  2114. zig = stbi__jpeg_dezigzag[k++];
  2115. data[zig] = (short)(stbi__extend_receive(j, s) * dequant[zig]);
  2116. }
  2117. }
  2118. } while (k < 64);
  2119. return 1;
  2120. }
  2121. static int stbi__jpeg_decode_block_prog_dc(
  2122. stbi__jpeg* j, short data[64], stbi__huffman* hdc, int b) {
  2123. int diff, dc;
  2124. int t;
  2125. if (j->spec_end != 0)
  2126. return stbi__err("can't merge dc and ac", "Corrupt JPEG");
  2127. if (j->code_bits < 16)
  2128. stbi__grow_buffer_unsafe(j);
  2129. if (j->succ_high == 0) {
  2130. // first scan for DC coefficient, must be first
  2131. memset(data, 0, 64 * sizeof(data[0])); // 0 all the ac values now
  2132. t = stbi__jpeg_huff_decode(j, hdc);
  2133. if (t < 0 || t > 15)
  2134. return stbi__err("can't merge dc and ac", "Corrupt JPEG");
  2135. diff = t ? stbi__extend_receive(j, t) : 0;
  2136. dc = j->img_comp[b].dc_pred + diff;
  2137. j->img_comp[b].dc_pred = dc;
  2138. data[0] = (short)(dc * (1 << j->succ_low));
  2139. } else {
  2140. // refinement scan for DC coefficient
  2141. if (stbi__jpeg_get_bit(j))
  2142. data[0] += (short)(1 << j->succ_low);
  2143. }
  2144. return 1;
  2145. }
  2146. // @OPTIMIZE: store non-zigzagged during the decode passes,
  2147. // and only de-zigzag when dequantizing
  2148. static int stbi__jpeg_decode_block_prog_ac(
  2149. stbi__jpeg* j, short data[64], stbi__huffman* hac, stbi__int16* fac) {
  2150. int k;
  2151. if (j->spec_start == 0)
  2152. return stbi__err("can't merge dc and ac", "Corrupt JPEG");
  2153. if (j->succ_high == 0) {
  2154. int shift = j->succ_low;
  2155. if (j->eob_run) {
  2156. --j->eob_run;
  2157. return 1;
  2158. }
  2159. k = j->spec_start;
  2160. do {
  2161. unsigned int zig;
  2162. int c, r, s;
  2163. if (j->code_bits < 16)
  2164. stbi__grow_buffer_unsafe(j);
  2165. c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1);
  2166. r = fac[c];
  2167. if (r) { // fast-AC path
  2168. k += (r >> 4) & 15; // run
  2169. s = r & 15; // combined length
  2170. j->code_buffer <<= s;
  2171. j->code_bits -= s;
  2172. zig = stbi__jpeg_dezigzag[k++];
  2173. data[zig] = (short)((r >> 8) * (1 << shift));
  2174. } else {
  2175. int rs = stbi__jpeg_huff_decode(j, hac);
  2176. if (rs < 0)
  2177. return stbi__err("bad huffman code", "Corrupt JPEG");
  2178. s = rs & 15;
  2179. r = rs >> 4;
  2180. if (s == 0) {
  2181. if (r < 15) {
  2182. j->eob_run = (1 << r);
  2183. if (r)
  2184. j->eob_run += stbi__jpeg_get_bits(j, r);
  2185. --j->eob_run;
  2186. break;
  2187. }
  2188. k += 16;
  2189. } else {
  2190. k += r;
  2191. zig = stbi__jpeg_dezigzag[k++];
  2192. data[zig] = (short)(stbi__extend_receive(j, s) * (1 << shift));
  2193. }
  2194. }
  2195. } while (k <= j->spec_end);
  2196. } else {
  2197. // refinement scan for these AC coefficients
  2198. short bit = (short)(1 << j->succ_low);
  2199. if (j->eob_run) {
  2200. --j->eob_run;
  2201. for (k = j->spec_start; k <= j->spec_end; ++k) {
  2202. short* p = &data[stbi__jpeg_dezigzag[k]];
  2203. if (*p != 0)
  2204. if (stbi__jpeg_get_bit(j))
  2205. if ((*p & bit) == 0) {
  2206. if (*p > 0)
  2207. *p += bit;
  2208. else
  2209. *p -= bit;
  2210. }
  2211. }
  2212. } else {
  2213. k = j->spec_start;
  2214. do {
  2215. int r, s;
  2216. int rs = stbi__jpeg_huff_decode(
  2217. j, hac); // @OPTIMIZE see if we can use the fast path here,
  2218. // advance-by-r is so slow, eh
  2219. if (rs < 0)
  2220. return stbi__err("bad huffman code", "Corrupt JPEG");
  2221. s = rs & 15;
  2222. r = rs >> 4;
  2223. if (s == 0) {
  2224. if (r < 15) {
  2225. j->eob_run = (1 << r) - 1;
  2226. if (r)
  2227. j->eob_run += stbi__jpeg_get_bits(j, r);
  2228. r = 64; // force end of block
  2229. } else {
  2230. // r=15 s=0 should write 16 0s, so we just do
  2231. // a run of 15 0s and then write s (which is 0),
  2232. // so we don't have to do anything special here
  2233. }
  2234. } else {
  2235. if (s != 1)
  2236. return stbi__err("bad huffman code", "Corrupt JPEG");
  2237. // sign bit
  2238. if (stbi__jpeg_get_bit(j))
  2239. s = bit;
  2240. else
  2241. s = -bit;
  2242. }
  2243. // advance by r
  2244. while (k <= j->spec_end) {
  2245. short* p = &data[stbi__jpeg_dezigzag[k++]];
  2246. if (*p != 0) {
  2247. if (stbi__jpeg_get_bit(j))
  2248. if ((*p & bit) == 0) {
  2249. if (*p > 0)
  2250. *p += bit;
  2251. else
  2252. *p -= bit;
  2253. }
  2254. } else {
  2255. if (r == 0) {
  2256. *p = (short)s;
  2257. break;
  2258. }
  2259. --r;
  2260. }
  2261. }
  2262. } while (k <= j->spec_end);
  2263. }
  2264. }
  2265. return 1;
  2266. }
  2267. // take a -128..127 value and stbi__clamp it and convert to 0..255
  2268. stbi_inline static stbi_uc stbi__clamp(int x) {
  2269. // trick to use a single test to catch both cases
  2270. if ((unsigned int)x > 255) {
  2271. if (x < 0)
  2272. return 0;
  2273. if (x > 255)
  2274. return 255;
  2275. }
  2276. return (stbi_uc)x;
  2277. }
  2278. #define stbi__f2f(x) ((int)(((x)*4096 + 0.5)))
  2279. #define stbi__fsh(x) ((x)*4096)
  2280. // derived from jidctint -- DCT_ISLOW
  2281. #define STBI__IDCT_1D(s0, s1, s2, s3, s4, s5, s6, s7) \
  2282. int t0, t1, t2, t3, p1, p2, p3, p4, p5, x0, x1, x2, x3; \
  2283. p2 = s2; \
  2284. p3 = s6; \
  2285. p1 = (p2 + p3) * stbi__f2f(0.5411961f); \
  2286. t2 = p1 + p3 * stbi__f2f(-1.847759065f); \
  2287. t3 = p1 + p2 * stbi__f2f(0.765366865f); \
  2288. p2 = s0; \
  2289. p3 = s4; \
  2290. t0 = stbi__fsh(p2 + p3); \
  2291. t1 = stbi__fsh(p2 - p3); \
  2292. x0 = t0 + t3; \
  2293. x3 = t0 - t3; \
  2294. x1 = t1 + t2; \
  2295. x2 = t1 - t2; \
  2296. t0 = s7; \
  2297. t1 = s5; \
  2298. t2 = s3; \
  2299. t3 = s1; \
  2300. p3 = t0 + t2; \
  2301. p4 = t1 + t3; \
  2302. p1 = t0 + t3; \
  2303. p2 = t1 + t2; \
  2304. p5 = (p3 + p4) * stbi__f2f(1.175875602f); \
  2305. t0 = t0 * stbi__f2f(0.298631336f); \
  2306. t1 = t1 * stbi__f2f(2.053119869f); \
  2307. t2 = t2 * stbi__f2f(3.072711026f); \
  2308. t3 = t3 * stbi__f2f(1.501321110f); \
  2309. p1 = p5 + p1 * stbi__f2f(-0.899976223f); \
  2310. p2 = p5 + p2 * stbi__f2f(-2.562915447f); \
  2311. p3 = p3 * stbi__f2f(-1.961570560f); \
  2312. p4 = p4 * stbi__f2f(-0.390180644f); \
  2313. t3 += p1 + p4; \
  2314. t2 += p2 + p3; \
  2315. t1 += p2 + p4; \
  2316. t0 += p1 + p3;
  2317. static void stbi__idct_block(stbi_uc* out, int out_stride, short data[64]) {
  2318. int i, val[64], *v = val;
  2319. stbi_uc* o;
  2320. short* d = data;
  2321. // columns
  2322. for (i = 0; i < 8; ++i, ++d, ++v) {
  2323. // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing
  2324. if (d[8] == 0 && d[16] == 0 && d[24] == 0 && d[32] == 0 && d[40] == 0 &&
  2325. d[48] == 0 && d[56] == 0) {
  2326. // no shortcut 0 seconds
  2327. // (1|2|3|4|5|6|7)==0 0 seconds
  2328. // all separate -0.047 seconds
  2329. // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds
  2330. int dcterm = d[0] * 4;
  2331. v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm;
  2332. } else {
  2333. STBI__IDCT_1D(d[0], d[8], d[16], d[24], d[32], d[40], d[48], d[56])
  2334. // constants scaled things up by 1<<12; let's bring them back
  2335. // down, but keep 2 extra bits of precision
  2336. x0 += 512;
  2337. x1 += 512;
  2338. x2 += 512;
  2339. x3 += 512;
  2340. v[0] = (x0 + t3) >> 10;
  2341. v[56] = (x0 - t3) >> 10;
  2342. v[8] = (x1 + t2) >> 10;
  2343. v[48] = (x1 - t2) >> 10;
  2344. v[16] = (x2 + t1) >> 10;
  2345. v[40] = (x2 - t1) >> 10;
  2346. v[24] = (x3 + t0) >> 10;
  2347. v[32] = (x3 - t0) >> 10;
  2348. }
  2349. }
  2350. for (i = 0, v = val, o = out; i < 8; ++i, v += 8, o += out_stride) {
  2351. // no fast case since the first 1D IDCT spread components out
  2352. STBI__IDCT_1D(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7])
  2353. // constants scaled things up by 1<<12, plus we had 1<<2 from first
  2354. // loop, plus horizontal and vertical each scale by sqrt(8) so together
  2355. // we've got an extra 1<<3, so 1<<17 total we need to remove.
  2356. // so we want to round that, which means adding 0.5 * 1<<17,
  2357. // aka 65536. Also, we'll end up with -128 to 127 that we want
  2358. // to encode as 0..255 by adding 128, so we'll add that before the shift
  2359. x0 += 65536 + (128 << 17);
  2360. x1 += 65536 + (128 << 17);
  2361. x2 += 65536 + (128 << 17);
  2362. x3 += 65536 + (128 << 17);
  2363. // tried computing the shifts into temps, or'ing the temps to see
  2364. // if any were out of range, but that was slower
  2365. o[0] = stbi__clamp((x0 + t3) >> 17);
  2366. o[7] = stbi__clamp((x0 - t3) >> 17);
  2367. o[1] = stbi__clamp((x1 + t2) >> 17);
  2368. o[6] = stbi__clamp((x1 - t2) >> 17);
  2369. o[2] = stbi__clamp((x2 + t1) >> 17);
  2370. o[5] = stbi__clamp((x2 - t1) >> 17);
  2371. o[3] = stbi__clamp((x3 + t0) >> 17);
  2372. o[4] = stbi__clamp((x3 - t0) >> 17);
  2373. }
  2374. }
  2375. #ifdef STBI_SSE2
  2376. // sse2 integer IDCT. not the fastest possible implementation but it
  2377. // produces bit-identical results to the generic C version so it's
  2378. // fully "transparent".
  2379. static void stbi__idct_simd(stbi_uc* out, int out_stride, short data[64]) {
  2380. // This is constructed to match our regular (generic) integer IDCT exactly.
  2381. __m128i row0, row1, row2, row3, row4, row5, row6, row7;
  2382. __m128i tmp;
  2383. // dot product constant: even elems=x, odd elems=y
  2384. #define dct_const(x, y) _mm_setr_epi16((x), (y), (x), (y), (x), (y), (x), (y))
  2385. // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit)
  2386. // out(1) = c1[even]*x + c1[odd]*y
  2387. #define dct_rot(out0, out1, x, y, c0, c1) \
  2388. __m128i c0##lo = _mm_unpacklo_epi16((x), (y)); \
  2389. __m128i c0##hi = _mm_unpackhi_epi16((x), (y)); \
  2390. __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \
  2391. __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \
  2392. __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \
  2393. __m128i out1##_h = _mm_madd_epi16(c0##hi, c1)
  2394. // out = in << 12 (in 16-bit, out 32-bit)
  2395. #define dct_widen(out, in) \
  2396. __m128i out##_l = \
  2397. _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \
  2398. __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4)
  2399. // wide add
  2400. #define dct_wadd(out, a, b) \
  2401. __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \
  2402. __m128i out##_h = _mm_add_epi32(a##_h, b##_h)
  2403. // wide sub
  2404. #define dct_wsub(out, a, b) \
  2405. __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \
  2406. __m128i out##_h = _mm_sub_epi32(a##_h, b##_h)
  2407. // butterfly a/b, add bias, then shift by "s" and pack
  2408. #define dct_bfly32o(out0, out1, a, b, bias, s) \
  2409. { \
  2410. __m128i abiased_l = _mm_add_epi32(a##_l, bias); \
  2411. __m128i abiased_h = _mm_add_epi32(a##_h, bias); \
  2412. dct_wadd(sum, abiased, b); \
  2413. dct_wsub(dif, abiased, b); \
  2414. out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \
  2415. out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \
  2416. }
  2417. // 8-bit interleave step (for transposes)
  2418. #define dct_interleave8(a, b) \
  2419. tmp = a; \
  2420. a = _mm_unpacklo_epi8(a, b); \
  2421. b = _mm_unpackhi_epi8(tmp, b)
  2422. // 16-bit interleave step (for transposes)
  2423. #define dct_interleave16(a, b) \
  2424. tmp = a; \
  2425. a = _mm_unpacklo_epi16(a, b); \
  2426. b = _mm_unpackhi_epi16(tmp, b)
  2427. #define dct_pass(bias, shift) \
  2428. { \
  2429. /* even part */ \
  2430. dct_rot(t2e, t3e, row2, row6, rot0_0, rot0_1); \
  2431. __m128i sum04 = _mm_add_epi16(row0, row4); \
  2432. __m128i dif04 = _mm_sub_epi16(row0, row4); \
  2433. dct_widen(t0e, sum04); \
  2434. dct_widen(t1e, dif04); \
  2435. dct_wadd(x0, t0e, t3e); \
  2436. dct_wsub(x3, t0e, t3e); \
  2437. dct_wadd(x1, t1e, t2e); \
  2438. dct_wsub(x2, t1e, t2e); \
  2439. /* odd part */ \
  2440. dct_rot(y0o, y2o, row7, row3, rot2_0, rot2_1); \
  2441. dct_rot(y1o, y3o, row5, row1, rot3_0, rot3_1); \
  2442. __m128i sum17 = _mm_add_epi16(row1, row7); \
  2443. __m128i sum35 = _mm_add_epi16(row3, row5); \
  2444. dct_rot(y4o, y5o, sum17, sum35, rot1_0, rot1_1); \
  2445. dct_wadd(x4, y0o, y4o); \
  2446. dct_wadd(x5, y1o, y5o); \
  2447. dct_wadd(x6, y2o, y5o); \
  2448. dct_wadd(x7, y3o, y4o); \
  2449. dct_bfly32o(row0, row7, x0, x7, bias, shift); \
  2450. dct_bfly32o(row1, row6, x1, x6, bias, shift); \
  2451. dct_bfly32o(row2, row5, x2, x5, bias, shift); \
  2452. dct_bfly32o(row3, row4, x3, x4, bias, shift); \
  2453. }
  2454. __m128i rot0_0 = dct_const(
  2455. stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f));
  2456. __m128i rot0_1 = dct_const(
  2457. stbi__f2f(0.5411961f) + stbi__f2f(0.765366865f), stbi__f2f(0.5411961f));
  2458. __m128i rot1_0 = dct_const(
  2459. stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f),
  2460. stbi__f2f(1.175875602f));
  2461. __m128i rot1_1 = dct_const(
  2462. stbi__f2f(1.175875602f),
  2463. stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f));
  2464. __m128i rot2_0 = dct_const(
  2465. stbi__f2f(-1.961570560f) + stbi__f2f(0.298631336f),
  2466. stbi__f2f(-1.961570560f));
  2467. __m128i rot2_1 = dct_const(
  2468. stbi__f2f(-1.961570560f),
  2469. stbi__f2f(-1.961570560f) + stbi__f2f(3.072711026f));
  2470. __m128i rot3_0 = dct_const(
  2471. stbi__f2f(-0.390180644f) + stbi__f2f(2.053119869f),
  2472. stbi__f2f(-0.390180644f));
  2473. __m128i rot3_1 = dct_const(
  2474. stbi__f2f(-0.390180644f),
  2475. stbi__f2f(-0.390180644f) + stbi__f2f(1.501321110f));
  2476. // rounding biases in column/row passes, see stbi__idct_block for explanation.
  2477. __m128i bias_0 = _mm_set1_epi32(512);
  2478. __m128i bias_1 = _mm_set1_epi32(65536 + (128 << 17));
  2479. // load
  2480. row0 = _mm_load_si128((const __m128i*)(data + 0 * 8));
  2481. row1 = _mm_load_si128((const __m128i*)(data + 1 * 8));
  2482. row2 = _mm_load_si128((const __m128i*)(data + 2 * 8));
  2483. row3 = _mm_load_si128((const __m128i*)(data + 3 * 8));
  2484. row4 = _mm_load_si128((const __m128i*)(data + 4 * 8));
  2485. row5 = _mm_load_si128((const __m128i*)(data + 5 * 8));
  2486. row6 = _mm_load_si128((const __m128i*)(data + 6 * 8));
  2487. row7 = _mm_load_si128((const __m128i*)(data + 7 * 8));
  2488. // column pass
  2489. dct_pass(bias_0, 10);
  2490. {
  2491. // 16bit 8x8 transpose pass 1
  2492. dct_interleave16(row0, row4);
  2493. dct_interleave16(row1, row5);
  2494. dct_interleave16(row2, row6);
  2495. dct_interleave16(row3, row7);
  2496. // transpose pass 2
  2497. dct_interleave16(row0, row2);
  2498. dct_interleave16(row1, row3);
  2499. dct_interleave16(row4, row6);
  2500. dct_interleave16(row5, row7);
  2501. // transpose pass 3
  2502. dct_interleave16(row0, row1);
  2503. dct_interleave16(row2, row3);
  2504. dct_interleave16(row4, row5);
  2505. dct_interleave16(row6, row7);
  2506. }
  2507. // row pass
  2508. dct_pass(bias_1, 17);
  2509. {
  2510. // pack
  2511. __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7
  2512. __m128i p1 = _mm_packus_epi16(row2, row3);
  2513. __m128i p2 = _mm_packus_epi16(row4, row5);
  2514. __m128i p3 = _mm_packus_epi16(row6, row7);
  2515. // 8bit 8x8 transpose pass 1
  2516. dct_interleave8(p0, p2); // a0e0a1e1...
  2517. dct_interleave8(p1, p3); // c0g0c1g1...
  2518. // transpose pass 2
  2519. dct_interleave8(p0, p1); // a0c0e0g0...
  2520. dct_interleave8(p2, p3); // b0d0f0h0...
  2521. // transpose pass 3
  2522. dct_interleave8(p0, p2); // a0b0c0d0...
  2523. dct_interleave8(p1, p3); // a4b4c4d4...
  2524. // store
  2525. _mm_storel_epi64((__m128i*)out, p0);
  2526. out += out_stride;
  2527. _mm_storel_epi64((__m128i*)out, _mm_shuffle_epi32(p0, 0x4e));
  2528. out += out_stride;
  2529. _mm_storel_epi64((__m128i*)out, p2);
  2530. out += out_stride;
  2531. _mm_storel_epi64((__m128i*)out, _mm_shuffle_epi32(p2, 0x4e));
  2532. out += out_stride;
  2533. _mm_storel_epi64((__m128i*)out, p1);
  2534. out += out_stride;
  2535. _mm_storel_epi64((__m128i*)out, _mm_shuffle_epi32(p1, 0x4e));
  2536. out += out_stride;
  2537. _mm_storel_epi64((__m128i*)out, p3);
  2538. out += out_stride;
  2539. _mm_storel_epi64((__m128i*)out, _mm_shuffle_epi32(p3, 0x4e));
  2540. }
  2541. #undef dct_const
  2542. #undef dct_rot
  2543. #undef dct_widen
  2544. #undef dct_wadd
  2545. #undef dct_wsub
  2546. #undef dct_bfly32o
  2547. #undef dct_interleave8
  2548. #undef dct_interleave16
  2549. #undef dct_pass
  2550. }
  2551. #endif // STBI_SSE2
  2552. #ifdef STBI_NEON
  2553. // NEON integer IDCT. should produce bit-identical
  2554. // results to the generic C version.
  2555. static void stbi__idct_simd(stbi_uc* out, int out_stride, short data[64]) {
  2556. int16x8_t row0, row1, row2, row3, row4, row5, row6, row7;
  2557. int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f));
  2558. int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f));
  2559. int16x4_t rot0_2 = vdup_n_s16(stbi__f2f(0.765366865f));
  2560. int16x4_t rot1_0 = vdup_n_s16(stbi__f2f(1.175875602f));
  2561. int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f));
  2562. int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f));
  2563. int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f));
  2564. int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f));
  2565. int16x4_t rot3_0 = vdup_n_s16(stbi__f2f(0.298631336f));
  2566. int16x4_t rot3_1 = vdup_n_s16(stbi__f2f(2.053119869f));
  2567. int16x4_t rot3_2 = vdup_n_s16(stbi__f2f(3.072711026f));
  2568. int16x4_t rot3_3 = vdup_n_s16(stbi__f2f(1.501321110f));
  2569. #define dct_long_mul(out, inq, coeff) \
  2570. int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \
  2571. int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff)
  2572. #define dct_long_mac(out, acc, inq, coeff) \
  2573. int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \
  2574. int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff)
  2575. #define dct_widen(out, inq) \
  2576. int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \
  2577. int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12)
  2578. // wide add
  2579. #define dct_wadd(out, a, b) \
  2580. int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \
  2581. int32x4_t out##_h = vaddq_s32(a##_h, b##_h)
  2582. // wide sub
  2583. #define dct_wsub(out, a, b) \
  2584. int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \
  2585. int32x4_t out##_h = vsubq_s32(a##_h, b##_h)
  2586. // butterfly a/b, then shift using "shiftop" by "s" and pack
  2587. #define dct_bfly32o(out0, out1, a, b, shiftop, s) \
  2588. { \
  2589. dct_wadd(sum, a, b); \
  2590. dct_wsub(dif, a, b); \
  2591. out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \
  2592. out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \
  2593. }
  2594. #define dct_pass(shiftop, shift) \
  2595. { \
  2596. /* even part */ \
  2597. int16x8_t sum26 = vaddq_s16(row2, row6); \
  2598. dct_long_mul(p1e, sum26, rot0_0); \
  2599. dct_long_mac(t2e, p1e, row6, rot0_1); \
  2600. dct_long_mac(t3e, p1e, row2, rot0_2); \
  2601. int16x8_t sum04 = vaddq_s16(row0, row4); \
  2602. int16x8_t dif04 = vsubq_s16(row0, row4); \
  2603. dct_widen(t0e, sum04); \
  2604. dct_widen(t1e, dif04); \
  2605. dct_wadd(x0, t0e, t3e); \
  2606. dct_wsub(x3, t0e, t3e); \
  2607. dct_wadd(x1, t1e, t2e); \
  2608. dct_wsub(x2, t1e, t2e); \
  2609. /* odd part */ \
  2610. int16x8_t sum15 = vaddq_s16(row1, row5); \
  2611. int16x8_t sum17 = vaddq_s16(row1, row7); \
  2612. int16x8_t sum35 = vaddq_s16(row3, row5); \
  2613. int16x8_t sum37 = vaddq_s16(row3, row7); \
  2614. int16x8_t sumodd = vaddq_s16(sum17, sum35); \
  2615. dct_long_mul(p5o, sumodd, rot1_0); \
  2616. dct_long_mac(p1o, p5o, sum17, rot1_1); \
  2617. dct_long_mac(p2o, p5o, sum35, rot1_2); \
  2618. dct_long_mul(p3o, sum37, rot2_0); \
  2619. dct_long_mul(p4o, sum15, rot2_1); \
  2620. dct_wadd(sump13o, p1o, p3o); \
  2621. dct_wadd(sump24o, p2o, p4o); \
  2622. dct_wadd(sump23o, p2o, p3o); \
  2623. dct_wadd(sump14o, p1o, p4o); \
  2624. dct_long_mac(x4, sump13o, row7, rot3_0); \
  2625. dct_long_mac(x5, sump24o, row5, rot3_1); \
  2626. dct_long_mac(x6, sump23o, row3, rot3_2); \
  2627. dct_long_mac(x7, sump14o, row1, rot3_3); \
  2628. dct_bfly32o(row0, row7, x0, x7, shiftop, shift); \
  2629. dct_bfly32o(row1, row6, x1, x6, shiftop, shift); \
  2630. dct_bfly32o(row2, row5, x2, x5, shiftop, shift); \
  2631. dct_bfly32o(row3, row4, x3, x4, shiftop, shift); \
  2632. }
  2633. // load
  2634. row0 = vld1q_s16(data + 0 * 8);
  2635. row1 = vld1q_s16(data + 1 * 8);
  2636. row2 = vld1q_s16(data + 2 * 8);
  2637. row3 = vld1q_s16(data + 3 * 8);
  2638. row4 = vld1q_s16(data + 4 * 8);
  2639. row5 = vld1q_s16(data + 5 * 8);
  2640. row6 = vld1q_s16(data + 6 * 8);
  2641. row7 = vld1q_s16(data + 7 * 8);
  2642. // add DC bias
  2643. row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0));
  2644. // column pass
  2645. dct_pass(vrshrn_n_s32, 10);
  2646. // 16bit 8x8 transpose
  2647. {
  2648. // these three map to a single VTRN.16, VTRN.32, and VSWP, respectively.
  2649. // whether compilers actually get this is another story, sadly.
  2650. #define dct_trn16(x, y) \
  2651. { \
  2652. int16x8x2_t t = vtrnq_s16(x, y); \
  2653. x = t.val[0]; \
  2654. y = t.val[1]; \
  2655. }
  2656. #define dct_trn32(x, y) \
  2657. { \
  2658. int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); \
  2659. x = vreinterpretq_s16_s32(t.val[0]); \
  2660. y = vreinterpretq_s16_s32(t.val[1]); \
  2661. }
  2662. #define dct_trn64(x, y) \
  2663. { \
  2664. int16x8_t x0 = x; \
  2665. int16x8_t y0 = y; \
  2666. x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); \
  2667. y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); \
  2668. }
  2669. // pass 1
  2670. dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6
  2671. dct_trn16(row2, row3);
  2672. dct_trn16(row4, row5);
  2673. dct_trn16(row6, row7);
  2674. // pass 2
  2675. dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4
  2676. dct_trn32(row1, row3);
  2677. dct_trn32(row4, row6);
  2678. dct_trn32(row5, row7);
  2679. // pass 3
  2680. dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0
  2681. dct_trn64(row1, row5);
  2682. dct_trn64(row2, row6);
  2683. dct_trn64(row3, row7);
  2684. #undef dct_trn16
  2685. #undef dct_trn32
  2686. #undef dct_trn64
  2687. }
  2688. // row pass
  2689. // vrshrn_n_s32 only supports shifts up to 16, we need
  2690. // 17. so do a non-rounding shift of 16 first then follow
  2691. // up with a rounding shift by 1.
  2692. dct_pass(vshrn_n_s32, 16);
  2693. {
  2694. // pack and round
  2695. uint8x8_t p0 = vqrshrun_n_s16(row0, 1);
  2696. uint8x8_t p1 = vqrshrun_n_s16(row1, 1);
  2697. uint8x8_t p2 = vqrshrun_n_s16(row2, 1);
  2698. uint8x8_t p3 = vqrshrun_n_s16(row3, 1);
  2699. uint8x8_t p4 = vqrshrun_n_s16(row4, 1);
  2700. uint8x8_t p5 = vqrshrun_n_s16(row5, 1);
  2701. uint8x8_t p6 = vqrshrun_n_s16(row6, 1);
  2702. uint8x8_t p7 = vqrshrun_n_s16(row7, 1);
  2703. // again, these can translate into one instruction, but often don't.
  2704. #define dct_trn8_8(x, y) \
  2705. { \
  2706. uint8x8x2_t t = vtrn_u8(x, y); \
  2707. x = t.val[0]; \
  2708. y = t.val[1]; \
  2709. }
  2710. #define dct_trn8_16(x, y) \
  2711. { \
  2712. uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); \
  2713. x = vreinterpret_u8_u16(t.val[0]); \
  2714. y = vreinterpret_u8_u16(t.val[1]); \
  2715. }
  2716. #define dct_trn8_32(x, y) \
  2717. { \
  2718. uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); \
  2719. x = vreinterpret_u8_u32(t.val[0]); \
  2720. y = vreinterpret_u8_u32(t.val[1]); \
  2721. }
  2722. // sadly can't use interleaved stores here since we only write
  2723. // 8 bytes to each scan line!
  2724. // 8x8 8-bit transpose pass 1
  2725. dct_trn8_8(p0, p1);
  2726. dct_trn8_8(p2, p3);
  2727. dct_trn8_8(p4, p5);
  2728. dct_trn8_8(p6, p7);
  2729. // pass 2
  2730. dct_trn8_16(p0, p2);
  2731. dct_trn8_16(p1, p3);
  2732. dct_trn8_16(p4, p6);
  2733. dct_trn8_16(p5, p7);
  2734. // pass 3
  2735. dct_trn8_32(p0, p4);
  2736. dct_trn8_32(p1, p5);
  2737. dct_trn8_32(p2, p6);
  2738. dct_trn8_32(p3, p7);
  2739. // store
  2740. vst1_u8(out, p0);
  2741. out += out_stride;
  2742. vst1_u8(out, p1);
  2743. out += out_stride;
  2744. vst1_u8(out, p2);
  2745. out += out_stride;
  2746. vst1_u8(out, p3);
  2747. out += out_stride;
  2748. vst1_u8(out, p4);
  2749. out += out_stride;
  2750. vst1_u8(out, p5);
  2751. out += out_stride;
  2752. vst1_u8(out, p6);
  2753. out += out_stride;
  2754. vst1_u8(out, p7);
  2755. #undef dct_trn8_8
  2756. #undef dct_trn8_16
  2757. #undef dct_trn8_32
  2758. }
  2759. #undef dct_long_mul
  2760. #undef dct_long_mac
  2761. #undef dct_widen
  2762. #undef dct_wadd
  2763. #undef dct_wsub
  2764. #undef dct_bfly32o
  2765. #undef dct_pass
  2766. }
  2767. #endif // STBI_NEON
  2768. #define STBI__MARKER_none 0xff
  2769. // if there's a pending marker from the entropy stream, return that
  2770. // otherwise, fetch from the stream and get a marker. if there's no
  2771. // marker, return 0xff, which is never a valid marker value
  2772. static stbi_uc stbi__get_marker(stbi__jpeg* j) {
  2773. stbi_uc x;
  2774. if (j->marker != STBI__MARKER_none) {
  2775. x = j->marker;
  2776. j->marker = STBI__MARKER_none;
  2777. return x;
  2778. }
  2779. x = stbi__get8(j->s);
  2780. if (x != 0xff)
  2781. return STBI__MARKER_none;
  2782. while (x == 0xff)
  2783. x = stbi__get8(j->s); // consume repeated 0xff fill bytes
  2784. return x;
  2785. }
  2786. // in each scan, we'll have scan_n components, and the order
  2787. // of the components is specified by order[]
  2788. #define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7)
  2789. // after a restart interval, stbi__jpeg_reset the entropy decoder and
  2790. // the dc prediction
  2791. static void stbi__jpeg_reset(stbi__jpeg* j) {
  2792. j->code_bits = 0;
  2793. j->code_buffer = 0;
  2794. j->nomore = 0;
  2795. j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred =
  2796. j->img_comp[3].dc_pred = 0;
  2797. j->marker = STBI__MARKER_none;
  2798. j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff;
  2799. j->eob_run = 0;
  2800. // no more than 1<<31 MCUs if no restart_interal? that's plenty safe,
  2801. // since we don't even allow 1<<30 pixels
  2802. }
  2803. static int stbi__parse_entropy_coded_data(stbi__jpeg* z) {
  2804. stbi__jpeg_reset(z);
  2805. if (!z->progressive) {
  2806. if (z->scan_n == 1) {
  2807. int i, j;
  2808. STBI_SIMD_ALIGN(short, data[64]);
  2809. int n = z->order[0];
  2810. // non-interleaved data, we just need to process one block at a time,
  2811. // in trivial scanline order
  2812. // number of blocks to do just depends on how many actual "pixels" this
  2813. // component has, independent of interleaved MCU blocking and such
  2814. int w = (z->img_comp[n].x + 7) >> 3;
  2815. int h = (z->img_comp[n].y + 7) >> 3;
  2816. for (j = 0; j < h; ++j) {
  2817. for (i = 0; i < w; ++i) {
  2818. int ha = z->img_comp[n].ha;
  2819. if (!stbi__jpeg_decode_block(
  2820. z, data, z->huff_dc + z->img_comp[n].hd,
  2821. z->huff_ac + ha, z->fast_ac[ha], n,
  2822. z->dequant[z->img_comp[n].tq]))
  2823. return 0;
  2824. z->idct_block_kernel(
  2825. z->img_comp[n].data + z->img_comp[n].w2 * j * 8 + i * 8,
  2826. z->img_comp[n].w2, data);
  2827. // every data block is an MCU, so countdown the restart interval
  2828. if (--z->todo <= 0) {
  2829. if (z->code_bits < 24)
  2830. stbi__grow_buffer_unsafe(z);
  2831. // if it's NOT a restart, then just bail, so we get corrupt data
  2832. // rather than no data
  2833. if (!STBI__RESTART(z->marker))
  2834. return 1;
  2835. stbi__jpeg_reset(z);
  2836. }
  2837. }
  2838. }
  2839. return 1;
  2840. } else { // interleaved
  2841. int i, j, k, x, y;
  2842. STBI_SIMD_ALIGN(short, data[64]);
  2843. for (j = 0; j < z->img_mcu_y; ++j) {
  2844. for (i = 0; i < z->img_mcu_x; ++i) {
  2845. // scan an interleaved mcu... process scan_n components in order
  2846. for (k = 0; k < z->scan_n; ++k) {
  2847. int n = z->order[k];
  2848. // scan out an mcu's worth of this component; that's just
  2849. // determined by the basic H and V specified for the component
  2850. for (y = 0; y < z->img_comp[n].v; ++y) {
  2851. for (x = 0; x < z->img_comp[n].h; ++x) {
  2852. int x2 = (i * z->img_comp[n].h + x) * 8;
  2853. int y2 = (j * z->img_comp[n].v + y) * 8;
  2854. int ha = z->img_comp[n].ha;
  2855. if (!stbi__jpeg_decode_block(
  2856. z, data, z->huff_dc + z->img_comp[n].hd,
  2857. z->huff_ac + ha, z->fast_ac[ha], n,
  2858. z->dequant[z->img_comp[n].tq]))
  2859. return 0;
  2860. z->idct_block_kernel(
  2861. z->img_comp[n].data + z->img_comp[n].w2 * y2 +
  2862. x2,
  2863. z->img_comp[n].w2, data);
  2864. }
  2865. }
  2866. }
  2867. // after all interleaved components, that's an interleaved MCU,
  2868. // so now count down the restart interval
  2869. if (--z->todo <= 0) {
  2870. if (z->code_bits < 24)
  2871. stbi__grow_buffer_unsafe(z);
  2872. if (!STBI__RESTART(z->marker))
  2873. return 1;
  2874. stbi__jpeg_reset(z);
  2875. }
  2876. }
  2877. }
  2878. return 1;
  2879. }
  2880. } else {
  2881. if (z->scan_n == 1) {
  2882. int i, j;
  2883. int n = z->order[0];
  2884. // non-interleaved data, we just need to process one block at a time,
  2885. // in trivial scanline order
  2886. // number of blocks to do just depends on how many actual "pixels" this
  2887. // component has, independent of interleaved MCU blocking and such
  2888. int w = (z->img_comp[n].x + 7) >> 3;
  2889. int h = (z->img_comp[n].y + 7) >> 3;
  2890. for (j = 0; j < h; ++j) {
  2891. for (i = 0; i < w; ++i) {
  2892. short* data = z->img_comp[n].coeff +
  2893. 64 * (i + j * z->img_comp[n].coeff_w);
  2894. if (z->spec_start == 0) {
  2895. if (!stbi__jpeg_decode_block_prog_dc(
  2896. z, data, &z->huff_dc[z->img_comp[n].hd], n))
  2897. return 0;
  2898. } else {
  2899. int ha = z->img_comp[n].ha;
  2900. if (!stbi__jpeg_decode_block_prog_ac(
  2901. z, data, &z->huff_ac[ha], z->fast_ac[ha]))
  2902. return 0;
  2903. }
  2904. // every data block is an MCU, so countdown the restart interval
  2905. if (--z->todo <= 0) {
  2906. if (z->code_bits < 24)
  2907. stbi__grow_buffer_unsafe(z);
  2908. if (!STBI__RESTART(z->marker))
  2909. return 1;
  2910. stbi__jpeg_reset(z);
  2911. }
  2912. }
  2913. }
  2914. return 1;
  2915. } else { // interleaved
  2916. int i, j, k, x, y;
  2917. for (j = 0; j < z->img_mcu_y; ++j) {
  2918. for (i = 0; i < z->img_mcu_x; ++i) {
  2919. // scan an interleaved mcu... process scan_n components in order
  2920. for (k = 0; k < z->scan_n; ++k) {
  2921. int n = z->order[k];
  2922. // scan out an mcu's worth of this component; that's just
  2923. // determined by the basic H and V specified for the component
  2924. for (y = 0; y < z->img_comp[n].v; ++y) {
  2925. for (x = 0; x < z->img_comp[n].h; ++x) {
  2926. int x2 = (i * z->img_comp[n].h + x);
  2927. int y2 = (j * z->img_comp[n].v + y);
  2928. short* data = z->img_comp[n].coeff +
  2929. 64 * (x2 + y2 * z->img_comp[n].coeff_w);
  2930. if (!stbi__jpeg_decode_block_prog_dc(
  2931. z, data, &z->huff_dc[z->img_comp[n].hd], n))
  2932. return 0;
  2933. }
  2934. }
  2935. }
  2936. // after all interleaved components, that's an interleaved MCU,
  2937. // so now count down the restart interval
  2938. if (--z->todo <= 0) {
  2939. if (z->code_bits < 24)
  2940. stbi__grow_buffer_unsafe(z);
  2941. if (!STBI__RESTART(z->marker))
  2942. return 1;
  2943. stbi__jpeg_reset(z);
  2944. }
  2945. }
  2946. }
  2947. return 1;
  2948. }
  2949. }
  2950. }
  2951. static void stbi__jpeg_dequantize(short* data, stbi__uint16* dequant) {
  2952. int i;
  2953. for (i = 0; i < 64; ++i)
  2954. data[i] *= dequant[i];
  2955. }
  2956. static void stbi__jpeg_finish(stbi__jpeg* z) {
  2957. if (z->progressive) {
  2958. // dequantize and idct the data
  2959. int i, j, n;
  2960. for (n = 0; n < z->s->img_n; ++n) {
  2961. int w = (z->img_comp[n].x + 7) >> 3;
  2962. int h = (z->img_comp[n].y + 7) >> 3;
  2963. for (j = 0; j < h; ++j) {
  2964. for (i = 0; i < w; ++i) {
  2965. short* data = z->img_comp[n].coeff +
  2966. 64 * (i + j * z->img_comp[n].coeff_w);
  2967. stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]);
  2968. z->idct_block_kernel(
  2969. z->img_comp[n].data + z->img_comp[n].w2 * j * 8 + i * 8,
  2970. z->img_comp[n].w2, data);
  2971. }
  2972. }
  2973. }
  2974. }
  2975. }
  2976. static int stbi__process_marker(stbi__jpeg* z, int m) {
  2977. int L;
  2978. switch (m) {
  2979. case STBI__MARKER_none: // no marker found
  2980. return stbi__err("expected marker", "Corrupt JPEG");
  2981. case 0xDD: // DRI - specify restart interval
  2982. if (stbi__get16be(z->s) != 4)
  2983. return stbi__err("bad DRI len", "Corrupt JPEG");
  2984. z->restart_interval = stbi__get16be(z->s);
  2985. return 1;
  2986. case 0xDB: // DQT - define quantization table
  2987. L = stbi__get16be(z->s) - 2;
  2988. while (L > 0) {
  2989. int q = stbi__get8(z->s);
  2990. int p = q >> 4, sixteen = (p != 0);
  2991. int t = q & 15, i;
  2992. if (p != 0 && p != 1)
  2993. return stbi__err("bad DQT type", "Corrupt JPEG");
  2994. if (t > 3)
  2995. return stbi__err("bad DQT table", "Corrupt JPEG");
  2996. for (i = 0; i < 64; ++i)
  2997. z->dequant[t][stbi__jpeg_dezigzag[i]] =
  2998. (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s));
  2999. L -= (sixteen ? 129 : 65);
  3000. }
  3001. return L == 0;
  3002. case 0xC4: // DHT - define huffman table
  3003. L = stbi__get16be(z->s) - 2;
  3004. while (L > 0) {
  3005. stbi_uc* v;
  3006. int sizes[16], i, n = 0;
  3007. int q = stbi__get8(z->s);
  3008. int tc = q >> 4;
  3009. int th = q & 15;
  3010. if (tc > 1 || th > 3)
  3011. return stbi__err("bad DHT header", "Corrupt JPEG");
  3012. for (i = 0; i < 16; ++i) {
  3013. sizes[i] = stbi__get8(z->s);
  3014. n += sizes[i];
  3015. }
  3016. L -= 17;
  3017. if (tc == 0) {
  3018. if (!stbi__build_huffman(z->huff_dc + th, sizes))
  3019. return 0;
  3020. v = z->huff_dc[th].values;
  3021. } else {
  3022. if (!stbi__build_huffman(z->huff_ac + th, sizes))
  3023. return 0;
  3024. v = z->huff_ac[th].values;
  3025. }
  3026. for (i = 0; i < n; ++i)
  3027. v[i] = stbi__get8(z->s);
  3028. if (tc != 0)
  3029. stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th);
  3030. L -= n;
  3031. }
  3032. return L == 0;
  3033. }
  3034. // check for comment block or APP blocks
  3035. if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) {
  3036. L = stbi__get16be(z->s);
  3037. if (L < 2) {
  3038. if (m == 0xFE)
  3039. return stbi__err("bad COM len", "Corrupt JPEG");
  3040. else
  3041. return stbi__err("bad APP len", "Corrupt JPEG");
  3042. }
  3043. L -= 2;
  3044. if (m == 0xE0 && L >= 5) { // JFIF APP0 segment
  3045. static const unsigned char tag[5] = {'J', 'F', 'I', 'F', '\0'};
  3046. int ok = 1;
  3047. int i;
  3048. for (i = 0; i < 5; ++i)
  3049. if (stbi__get8(z->s) != tag[i])
  3050. ok = 0;
  3051. L -= 5;
  3052. if (ok)
  3053. z->jfif = 1;
  3054. } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment
  3055. static const unsigned char tag[6] = {'A', 'd', 'o', 'b', 'e', '\0'};
  3056. int ok = 1;
  3057. int i;
  3058. for (i = 0; i < 6; ++i)
  3059. if (stbi__get8(z->s) != tag[i])
  3060. ok = 0;
  3061. L -= 6;
  3062. if (ok) {
  3063. stbi__get8(z->s); // version
  3064. stbi__get16be(z->s); // flags0
  3065. stbi__get16be(z->s); // flags1
  3066. z->app14_color_transform = stbi__get8(z->s); // color transform
  3067. L -= 6;
  3068. }
  3069. }
  3070. stbi__skip(z->s, L);
  3071. return 1;
  3072. }
  3073. return stbi__err("unknown marker", "Corrupt JPEG");
  3074. }
  3075. // after we see SOS
  3076. static int stbi__process_scan_header(stbi__jpeg* z) {
  3077. int i;
  3078. int Ls = stbi__get16be(z->s);
  3079. z->scan_n = stbi__get8(z->s);
  3080. if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int)z->s->img_n)
  3081. return stbi__err("bad SOS component count", "Corrupt JPEG");
  3082. if (Ls != 6 + 2 * z->scan_n)
  3083. return stbi__err("bad SOS len", "Corrupt JPEG");
  3084. for (i = 0; i < z->scan_n; ++i) {
  3085. int id = stbi__get8(z->s), which;
  3086. int q = stbi__get8(z->s);
  3087. for (which = 0; which < z->s->img_n; ++which)
  3088. if (z->img_comp[which].id == id)
  3089. break;
  3090. if (which == z->s->img_n)
  3091. return 0; // no match
  3092. z->img_comp[which].hd = q >> 4;
  3093. if (z->img_comp[which].hd > 3)
  3094. return stbi__err("bad DC huff", "Corrupt JPEG");
  3095. z->img_comp[which].ha = q & 15;
  3096. if (z->img_comp[which].ha > 3)
  3097. return stbi__err("bad AC huff", "Corrupt JPEG");
  3098. z->order[i] = which;
  3099. }
  3100. {
  3101. int aa;
  3102. z->spec_start = stbi__get8(z->s);
  3103. z->spec_end = stbi__get8(z->s); // should be 63, but might be 0
  3104. aa = stbi__get8(z->s);
  3105. z->succ_high = (aa >> 4);
  3106. z->succ_low = (aa & 15);
  3107. if (z->progressive) {
  3108. if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end ||
  3109. z->succ_high > 13 || z->succ_low > 13)
  3110. return stbi__err("bad SOS", "Corrupt JPEG");
  3111. } else {
  3112. if (z->spec_start != 0)
  3113. return stbi__err("bad SOS", "Corrupt JPEG");
  3114. if (z->succ_high != 0 || z->succ_low != 0)
  3115. return stbi__err("bad SOS", "Corrupt JPEG");
  3116. z->spec_end = 63;
  3117. }
  3118. }
  3119. return 1;
  3120. }
  3121. static int stbi__free_jpeg_components(stbi__jpeg* z, int ncomp, int why) {
  3122. int i;
  3123. for (i = 0; i < ncomp; ++i) {
  3124. if (z->img_comp[i].raw_data) {
  3125. STBI_FREE(z->img_comp[i].raw_data);
  3126. z->img_comp[i].raw_data = NULL;
  3127. z->img_comp[i].data = NULL;
  3128. }
  3129. if (z->img_comp[i].raw_coeff) {
  3130. STBI_FREE(z->img_comp[i].raw_coeff);
  3131. z->img_comp[i].raw_coeff = 0;
  3132. z->img_comp[i].coeff = 0;
  3133. }
  3134. if (z->img_comp[i].linebuf) {
  3135. STBI_FREE(z->img_comp[i].linebuf);
  3136. z->img_comp[i].linebuf = NULL;
  3137. }
  3138. }
  3139. return why;
  3140. }
  3141. static int stbi__process_frame_header(stbi__jpeg* z, int scan) {
  3142. stbi__context* s = z->s;
  3143. int Lf, p, i, q, h_max = 1, v_max = 1, c;
  3144. Lf = stbi__get16be(s);
  3145. if (Lf < 11)
  3146. return stbi__err("bad SOF len", "Corrupt JPEG"); // JPEG
  3147. p = stbi__get8(s);
  3148. if (p != 8)
  3149. return stbi__err(
  3150. "only 8-bit",
  3151. "JPEG format not supported: 8-bit only"); // JPEG baseline
  3152. s->img_y = stbi__get16be(s);
  3153. if (s->img_y == 0)
  3154. return stbi__err(
  3155. "no header height",
  3156. "JPEG format not supported: delayed height"); // Legal, but we don't
  3157. // handle it--but neither
  3158. // does IJG
  3159. s->img_x = stbi__get16be(s);
  3160. if (s->img_x == 0)
  3161. return stbi__err("0 width", "Corrupt JPEG"); // JPEG requires
  3162. if (s->img_y > STBI_MAX_DIMENSIONS)
  3163. return stbi__err("too large", "Very large image (corrupt?)");
  3164. if (s->img_x > STBI_MAX_DIMENSIONS)
  3165. return stbi__err("too large", "Very large image (corrupt?)");
  3166. c = stbi__get8(s);
  3167. if (c != 3 && c != 1 && c != 4)
  3168. return stbi__err("bad component count", "Corrupt JPEG");
  3169. s->img_n = c;
  3170. for (i = 0; i < c; ++i) {
  3171. z->img_comp[i].data = NULL;
  3172. z->img_comp[i].linebuf = NULL;
  3173. }
  3174. if (Lf != 8 + 3 * s->img_n)
  3175. return stbi__err("bad SOF len", "Corrupt JPEG");
  3176. z->rgb = 0;
  3177. for (i = 0; i < s->img_n; ++i) {
  3178. static const unsigned char rgb[3] = {'R', 'G', 'B'};
  3179. z->img_comp[i].id = stbi__get8(s);
  3180. if (s->img_n == 3 && z->img_comp[i].id == rgb[i])
  3181. ++z->rgb;
  3182. q = stbi__get8(s);
  3183. z->img_comp[i].h = (q >> 4);
  3184. if (!z->img_comp[i].h || z->img_comp[i].h > 4)
  3185. return stbi__err("bad H", "Corrupt JPEG");
  3186. z->img_comp[i].v = q & 15;
  3187. if (!z->img_comp[i].v || z->img_comp[i].v > 4)
  3188. return stbi__err("bad V", "Corrupt JPEG");
  3189. z->img_comp[i].tq = stbi__get8(s);
  3190. if (z->img_comp[i].tq > 3)
  3191. return stbi__err("bad TQ", "Corrupt JPEG");
  3192. }
  3193. if (scan != STBI__SCAN_load)
  3194. return 1;
  3195. if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0))
  3196. return stbi__err("too large", "Image too large to decode");
  3197. for (i = 0; i < s->img_n; ++i) {
  3198. if (z->img_comp[i].h > h_max)
  3199. h_max = z->img_comp[i].h;
  3200. if (z->img_comp[i].v > v_max)
  3201. v_max = z->img_comp[i].v;
  3202. }
  3203. // check that plane subsampling factors are integer ratios; our resamplers can't
  3204. // deal with fractional ratios and I've never seen a non-corrupted JPEG file
  3205. // actually use them
  3206. for (i = 0; i < s->img_n; ++i) {
  3207. if (h_max % z->img_comp[i].h != 0)
  3208. return stbi__err("bad H", "Corrupt JPEG");
  3209. if (v_max % z->img_comp[i].v != 0)
  3210. return stbi__err("bad V", "Corrupt JPEG");
  3211. }
  3212. // compute interleaved mcu info
  3213. z->img_h_max = h_max;
  3214. z->img_v_max = v_max;
  3215. z->img_mcu_w = h_max * 8;
  3216. z->img_mcu_h = v_max * 8;
  3217. // these sizes can't be more than 17 bits
  3218. z->img_mcu_x = (s->img_x + z->img_mcu_w - 1) / z->img_mcu_w;
  3219. z->img_mcu_y = (s->img_y + z->img_mcu_h - 1) / z->img_mcu_h;
  3220. for (i = 0; i < s->img_n; ++i) {
  3221. // number of effective pixels (e.g. for non-interleaved MCU)
  3222. z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max - 1) / h_max;
  3223. z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max - 1) / v_max;
  3224. // to simplify generation, we'll allocate enough memory to decode
  3225. // the bogus oversized data from using interleaved MCUs and their
  3226. // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't
  3227. // discard the extra data until colorspace conversion
  3228. //
  3229. // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier)
  3230. // so these muls can't overflow with 32-bit ints (which we require)
  3231. z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8;
  3232. z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8;
  3233. z->img_comp[i].coeff = 0;
  3234. z->img_comp[i].raw_coeff = 0;
  3235. z->img_comp[i].linebuf = NULL;
  3236. z->img_comp[i].raw_data =
  3237. stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15);
  3238. if (z->img_comp[i].raw_data == NULL)
  3239. return stbi__free_jpeg_components(
  3240. z, i + 1, stbi__err("outofmem", "Out of memory"));
  3241. // align blocks for idct using mmx/sse
  3242. z->img_comp[i].data = (stbi_uc*)(((size_t)z->img_comp[i].raw_data + 15) & ~15);
  3243. if (z->progressive) {
  3244. // w2, h2 are multiples of 8 (see above)
  3245. z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8;
  3246. z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8;
  3247. z->img_comp[i].raw_coeff = stbi__malloc_mad3(
  3248. z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15);
  3249. if (z->img_comp[i].raw_coeff == NULL)
  3250. return stbi__free_jpeg_components(
  3251. z, i + 1, stbi__err("outofmem", "Out of memory"));
  3252. z->img_comp[i].coeff =
  3253. (short*)(((size_t)z->img_comp[i].raw_coeff + 15) & ~15);
  3254. }
  3255. }
  3256. return 1;
  3257. }
  3258. // use comparisons since in some cases we handle more than one case (e.g. SOF)
  3259. #define stbi__DNL(x) ((x) == 0xdc)
  3260. #define stbi__SOI(x) ((x) == 0xd8)
  3261. #define stbi__EOI(x) ((x) == 0xd9)
  3262. #define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2)
  3263. #define stbi__SOS(x) ((x) == 0xda)
  3264. #define stbi__SOF_progressive(x) ((x) == 0xc2)
  3265. static int stbi__decode_jpeg_header(stbi__jpeg* z, int scan) {
  3266. int m;
  3267. z->jfif = 0;
  3268. z->app14_color_transform = -1; // valid values are 0,1,2
  3269. z->marker = STBI__MARKER_none; // initialize cached marker to empty
  3270. m = stbi__get_marker(z);
  3271. if (!stbi__SOI(m))
  3272. return stbi__err("no SOI", "Corrupt JPEG");
  3273. if (scan == STBI__SCAN_type)
  3274. return 1;
  3275. m = stbi__get_marker(z);
  3276. while (!stbi__SOF(m)) {
  3277. if (!stbi__process_marker(z, m))
  3278. return 0;
  3279. m = stbi__get_marker(z);
  3280. while (m == STBI__MARKER_none) {
  3281. // some files have extra padding after their blocks, so ok, we'll scan
  3282. if (stbi__at_eof(z->s))
  3283. return stbi__err("no SOF", "Corrupt JPEG");
  3284. m = stbi__get_marker(z);
  3285. }
  3286. }
  3287. z->progressive = stbi__SOF_progressive(m);
  3288. if (!stbi__process_frame_header(z, scan))
  3289. return 0;
  3290. return 1;
  3291. }
  3292. // decode image to YCbCr format
  3293. static int stbi__decode_jpeg_image(stbi__jpeg* j) {
  3294. int m;
  3295. for (m = 0; m < 4; m++) {
  3296. j->img_comp[m].raw_data = NULL;
  3297. j->img_comp[m].raw_coeff = NULL;
  3298. }
  3299. j->restart_interval = 0;
  3300. if (!stbi__decode_jpeg_header(j, STBI__SCAN_load))
  3301. return 0;
  3302. m = stbi__get_marker(j);
  3303. while (!stbi__EOI(m)) {
  3304. if (stbi__SOS(m)) {
  3305. if (!stbi__process_scan_header(j))
  3306. return 0;
  3307. if (!stbi__parse_entropy_coded_data(j))
  3308. return 0;
  3309. if (j->marker == STBI__MARKER_none) {
  3310. // handle 0s at the end of image data from IP Kamera 9060
  3311. while (!stbi__at_eof(j->s)) {
  3312. int x = stbi__get8(j->s);
  3313. if (x == 255) {
  3314. j->marker = stbi__get8(j->s);
  3315. break;
  3316. }
  3317. }
  3318. // if we reach eof without hitting a marker, stbi__get_marker() below
  3319. // will fail and we'll eventually return 0
  3320. }
  3321. } else if (stbi__DNL(m)) {
  3322. int Ld = stbi__get16be(j->s);
  3323. stbi__uint32 NL = stbi__get16be(j->s);
  3324. if (Ld != 4)
  3325. return stbi__err("bad DNL len", "Corrupt JPEG");
  3326. if (NL != j->s->img_y)
  3327. return stbi__err("bad DNL height", "Corrupt JPEG");
  3328. } else {
  3329. if (!stbi__process_marker(j, m))
  3330. return 0;
  3331. }
  3332. m = stbi__get_marker(j);
  3333. }
  3334. if (j->progressive)
  3335. stbi__jpeg_finish(j);
  3336. return 1;
  3337. }
  3338. // static jfif-centered resampling (across block boundaries)
  3339. typedef stbi_uc* (*resample_row_func)(
  3340. stbi_uc* out, stbi_uc* in0, stbi_uc* in1, int w, int hs);
  3341. #define stbi__div4(x) ((stbi_uc)((x) >> 2))
  3342. static stbi_uc* resample_row_1(
  3343. stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs) {
  3344. STBI_NOTUSED(out);
  3345. STBI_NOTUSED(in_far);
  3346. STBI_NOTUSED(w);
  3347. STBI_NOTUSED(hs);
  3348. return in_near;
  3349. }
  3350. static stbi_uc* stbi__resample_row_v_2(
  3351. stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs) {
  3352. // need to generate two samples vertically for every one in input
  3353. int i;
  3354. STBI_NOTUSED(hs);
  3355. for (i = 0; i < w; ++i)
  3356. out[i] = stbi__div4(3 * in_near[i] + in_far[i] + 2);
  3357. return out;
  3358. }
  3359. static stbi_uc* stbi__resample_row_h_2(
  3360. stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs) {
  3361. // need to generate two samples horizontally for every one in input
  3362. int i;
  3363. stbi_uc* input = in_near;
  3364. if (w == 1) {
  3365. // if only one sample, can't do any interpolation
  3366. out[0] = out[1] = input[0];
  3367. return out;
  3368. }
  3369. out[0] = input[0];
  3370. out[1] = stbi__div4(input[0] * 3 + input[1] + 2);
  3371. for (i = 1; i < w - 1; ++i) {
  3372. int n = 3 * input[i] + 2;
  3373. out[i * 2 + 0] = stbi__div4(n + input[i - 1]);
  3374. out[i * 2 + 1] = stbi__div4(n + input[i + 1]);
  3375. }
  3376. out[i * 2 + 0] = stbi__div4(input[w - 2] * 3 + input[w - 1] + 2);
  3377. out[i * 2 + 1] = input[w - 1];
  3378. STBI_NOTUSED(in_far);
  3379. STBI_NOTUSED(hs);
  3380. return out;
  3381. }
  3382. #define stbi__div16(x) ((stbi_uc)((x) >> 4))
  3383. static stbi_uc* stbi__resample_row_hv_2(
  3384. stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs) {
  3385. // need to generate 2x2 samples for every one in input
  3386. int i, t0, t1;
  3387. if (w == 1) {
  3388. out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2);
  3389. return out;
  3390. }
  3391. t1 = 3 * in_near[0] + in_far[0];
  3392. out[0] = stbi__div4(t1 + 2);
  3393. for (i = 1; i < w; ++i) {
  3394. t0 = t1;
  3395. t1 = 3 * in_near[i] + in_far[i];
  3396. out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8);
  3397. out[i * 2] = stbi__div16(3 * t1 + t0 + 8);
  3398. }
  3399. out[w * 2 - 1] = stbi__div4(t1 + 2);
  3400. STBI_NOTUSED(hs);
  3401. return out;
  3402. }
  3403. #if defined(STBI_SSE2) || defined(STBI_NEON)
  3404. static stbi_uc* stbi__resample_row_hv_2_simd(
  3405. stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs) {
  3406. // need to generate 2x2 samples for every one in input
  3407. int i = 0, t0, t1;
  3408. if (w == 1) {
  3409. out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2);
  3410. return out;
  3411. }
  3412. t1 = 3 * in_near[0] + in_far[0];
  3413. // process groups of 8 pixels for as long as we can.
  3414. // note we can't handle the last pixel in a row in this loop
  3415. // because we need to handle the filter boundary conditions.
  3416. for (; i < ((w - 1) & ~7); i += 8) {
  3417. #if defined(STBI_SSE2)
  3418. // load and perform the vertical filtering pass
  3419. // this uses 3*x + y = 4*x + (y - x)
  3420. __m128i zero = _mm_setzero_si128();
  3421. __m128i farb = _mm_loadl_epi64((__m128i*)(in_far + i));
  3422. __m128i nearb = _mm_loadl_epi64((__m128i*)(in_near + i));
  3423. __m128i farw = _mm_unpacklo_epi8(farb, zero);
  3424. __m128i nearw = _mm_unpacklo_epi8(nearb, zero);
  3425. __m128i diff = _mm_sub_epi16(farw, nearw);
  3426. __m128i nears = _mm_slli_epi16(nearw, 2);
  3427. __m128i curr = _mm_add_epi16(nears, diff); // current row
  3428. // horizontal filter works the same based on shifted vers of current
  3429. // row. "prev" is current row shifted right by 1 pixel; we need to
  3430. // insert the previous pixel value (from t1).
  3431. // "next" is current row shifted left by 1 pixel, with first pixel
  3432. // of next block of 8 pixels added in.
  3433. __m128i prv0 = _mm_slli_si128(curr, 2);
  3434. __m128i nxt0 = _mm_srli_si128(curr, 2);
  3435. __m128i prev = _mm_insert_epi16(prv0, t1, 0);
  3436. __m128i next = _mm_insert_epi16(nxt0, 3 * in_near[i + 8] + in_far[i + 8], 7);
  3437. // horizontal filter, polyphase implementation since it's convenient:
  3438. // even pixels = 3*cur + prev = cur*4 + (prev - cur)
  3439. // odd pixels = 3*cur + next = cur*4 + (next - cur)
  3440. // note the shared term.
  3441. __m128i bias = _mm_set1_epi16(8);
  3442. __m128i curs = _mm_slli_epi16(curr, 2);
  3443. __m128i prvd = _mm_sub_epi16(prev, curr);
  3444. __m128i nxtd = _mm_sub_epi16(next, curr);
  3445. __m128i curb = _mm_add_epi16(curs, bias);
  3446. __m128i even = _mm_add_epi16(prvd, curb);
  3447. __m128i odd = _mm_add_epi16(nxtd, curb);
  3448. // interleave even and odd pixels, then undo scaling.
  3449. __m128i int0 = _mm_unpacklo_epi16(even, odd);
  3450. __m128i int1 = _mm_unpackhi_epi16(even, odd);
  3451. __m128i de0 = _mm_srli_epi16(int0, 4);
  3452. __m128i de1 = _mm_srli_epi16(int1, 4);
  3453. // pack and write output
  3454. __m128i outv = _mm_packus_epi16(de0, de1);
  3455. _mm_storeu_si128((__m128i*)(out + i * 2), outv);
  3456. #elif defined(STBI_NEON)
  3457. // load and perform the vertical filtering pass
  3458. // this uses 3*x + y = 4*x + (y - x)
  3459. uint8x8_t farb = vld1_u8(in_far + i);
  3460. uint8x8_t nearb = vld1_u8(in_near + i);
  3461. int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb));
  3462. int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2));
  3463. int16x8_t curr = vaddq_s16(nears, diff); // current row
  3464. // horizontal filter works the same based on shifted vers of current
  3465. // row. "prev" is current row shifted right by 1 pixel; we need to
  3466. // insert the previous pixel value (from t1).
  3467. // "next" is current row shifted left by 1 pixel, with first pixel
  3468. // of next block of 8 pixels added in.
  3469. int16x8_t prv0 = vextq_s16(curr, curr, 7);
  3470. int16x8_t nxt0 = vextq_s16(curr, curr, 1);
  3471. int16x8_t prev = vsetq_lane_s16(t1, prv0, 0);
  3472. int16x8_t next = vsetq_lane_s16(3 * in_near[i + 8] + in_far[i + 8], nxt0, 7);
  3473. // horizontal filter, polyphase implementation since it's convenient:
  3474. // even pixels = 3*cur + prev = cur*4 + (prev - cur)
  3475. // odd pixels = 3*cur + next = cur*4 + (next - cur)
  3476. // note the shared term.
  3477. int16x8_t curs = vshlq_n_s16(curr, 2);
  3478. int16x8_t prvd = vsubq_s16(prev, curr);
  3479. int16x8_t nxtd = vsubq_s16(next, curr);
  3480. int16x8_t even = vaddq_s16(curs, prvd);
  3481. int16x8_t odd = vaddq_s16(curs, nxtd);
  3482. // undo scaling and round, then store with even/odd phases interleaved
  3483. uint8x8x2_t o;
  3484. o.val[0] = vqrshrun_n_s16(even, 4);
  3485. o.val[1] = vqrshrun_n_s16(odd, 4);
  3486. vst2_u8(out + i * 2, o);
  3487. #endif
  3488. // "previous" value for next iter
  3489. t1 = 3 * in_near[i + 7] + in_far[i + 7];
  3490. }
  3491. t0 = t1;
  3492. t1 = 3 * in_near[i] + in_far[i];
  3493. out[i * 2] = stbi__div16(3 * t1 + t0 + 8);
  3494. for (++i; i < w; ++i) {
  3495. t0 = t1;
  3496. t1 = 3 * in_near[i] + in_far[i];
  3497. out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8);
  3498. out[i * 2] = stbi__div16(3 * t1 + t0 + 8);
  3499. }
  3500. out[w * 2 - 1] = stbi__div4(t1 + 2);
  3501. STBI_NOTUSED(hs);
  3502. return out;
  3503. }
  3504. #endif
  3505. static stbi_uc* stbi__resample_row_generic(
  3506. stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs) {
  3507. // resample with nearest-neighbor
  3508. int i, j;
  3509. STBI_NOTUSED(in_far);
  3510. for (i = 0; i < w; ++i)
  3511. for (j = 0; j < hs; ++j)
  3512. out[i * hs + j] = in_near[i];
  3513. return out;
  3514. }
  3515. // this is a reduced-precision calculation of YCbCr-to-RGB introduced
  3516. // to make sure the code produces the same results in both SIMD and scalar
  3517. #define stbi__float2fixed(x) (((int)((x)*4096.0f + 0.5f)) << 8)
  3518. static void stbi__YCbCr_to_RGB_row(
  3519. stbi_uc* out, const stbi_uc* y, const stbi_uc* pcb, const stbi_uc* pcr,
  3520. int count, int step) {
  3521. int i;
  3522. for (i = 0; i < count; ++i) {
  3523. int y_fixed = (y[i] << 20) + (1 << 19); // rounding
  3524. int r, g, b;
  3525. int cr = pcr[i] - 128;
  3526. int cb = pcb[i] - 128;
  3527. r = y_fixed + cr * stbi__float2fixed(1.40200f);
  3528. g = y_fixed + (cr * -stbi__float2fixed(0.71414f)) +
  3529. ((cb * -stbi__float2fixed(0.34414f)) & 0xffff0000);
  3530. b = y_fixed + cb * stbi__float2fixed(1.77200f);
  3531. r >>= 20;
  3532. g >>= 20;
  3533. b >>= 20;
  3534. if ((unsigned)r > 255) {
  3535. if (r < 0)
  3536. r = 0;
  3537. else
  3538. r = 255;
  3539. }
  3540. if ((unsigned)g > 255) {
  3541. if (g < 0)
  3542. g = 0;
  3543. else
  3544. g = 255;
  3545. }
  3546. if ((unsigned)b > 255) {
  3547. if (b < 0)
  3548. b = 0;
  3549. else
  3550. b = 255;
  3551. }
  3552. out[0] = (stbi_uc)r;
  3553. out[1] = (stbi_uc)g;
  3554. out[2] = (stbi_uc)b;
  3555. out[3] = 255;
  3556. out += step;
  3557. }
  3558. }
  3559. #if defined(STBI_SSE2) || defined(STBI_NEON)
  3560. static void stbi__YCbCr_to_RGB_simd(
  3561. stbi_uc* out, stbi_uc const* y, stbi_uc const* pcb, stbi_uc const* pcr,
  3562. int count, int step) {
  3563. int i = 0;
  3564. #ifdef STBI_SSE2
  3565. // step == 3 is pretty ugly on the final interleave, and i'm not convinced
  3566. // it's useful in practice (you wouldn't use it for textures, for example).
  3567. // so just accelerate step == 4 case.
  3568. if (step == 4) {
  3569. // this is a fairly straightforward implementation and not super-optimized.
  3570. __m128i signflip = _mm_set1_epi8(-0x80);
  3571. __m128i cr_const0 = _mm_set1_epi16((short)(1.40200f * 4096.0f + 0.5f));
  3572. __m128i cr_const1 = _mm_set1_epi16(-(short)(0.71414f * 4096.0f + 0.5f));
  3573. __m128i cb_const0 = _mm_set1_epi16(-(short)(0.34414f * 4096.0f + 0.5f));
  3574. __m128i cb_const1 = _mm_set1_epi16((short)(1.77200f * 4096.0f + 0.5f));
  3575. __m128i y_bias = _mm_set1_epi8((char)(unsigned char)128);
  3576. __m128i xw = _mm_set1_epi16(255); // alpha channel
  3577. for (; i + 7 < count; i += 8) {
  3578. // load
  3579. __m128i y_bytes = _mm_loadl_epi64((__m128i*)(y + i));
  3580. __m128i cr_bytes = _mm_loadl_epi64((__m128i*)(pcr + i));
  3581. __m128i cb_bytes = _mm_loadl_epi64((__m128i*)(pcb + i));
  3582. __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128
  3583. __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128
  3584. // unpack to short (and left-shift cr, cb by 8)
  3585. __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes);
  3586. __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased);
  3587. __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased);
  3588. // color transform
  3589. __m128i yws = _mm_srli_epi16(yw, 4);
  3590. __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw);
  3591. __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw);
  3592. __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1);
  3593. __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1);
  3594. __m128i rws = _mm_add_epi16(cr0, yws);
  3595. __m128i gwt = _mm_add_epi16(cb0, yws);
  3596. __m128i bws = _mm_add_epi16(yws, cb1);
  3597. __m128i gws = _mm_add_epi16(gwt, cr1);
  3598. // descale
  3599. __m128i rw = _mm_srai_epi16(rws, 4);
  3600. __m128i bw = _mm_srai_epi16(bws, 4);
  3601. __m128i gw = _mm_srai_epi16(gws, 4);
  3602. // back to byte, set up for transpose
  3603. __m128i brb = _mm_packus_epi16(rw, bw);
  3604. __m128i gxb = _mm_packus_epi16(gw, xw);
  3605. // transpose to interleave channels
  3606. __m128i t0 = _mm_unpacklo_epi8(brb, gxb);
  3607. __m128i t1 = _mm_unpackhi_epi8(brb, gxb);
  3608. __m128i o0 = _mm_unpacklo_epi16(t0, t1);
  3609. __m128i o1 = _mm_unpackhi_epi16(t0, t1);
  3610. // store
  3611. _mm_storeu_si128((__m128i*)(out + 0), o0);
  3612. _mm_storeu_si128((__m128i*)(out + 16), o1);
  3613. out += 32;
  3614. }
  3615. }
  3616. #endif
  3617. #ifdef STBI_NEON
  3618. // in this version, step=3 support would be easy to add. but is there demand?
  3619. if (step == 4) {
  3620. // this is a fairly straightforward implementation and not super-optimized.
  3621. uint8x8_t signflip = vdup_n_u8(0x80);
  3622. int16x8_t cr_const0 = vdupq_n_s16((short)(1.40200f * 4096.0f + 0.5f));
  3623. int16x8_t cr_const1 = vdupq_n_s16(-(short)(0.71414f * 4096.0f + 0.5f));
  3624. int16x8_t cb_const0 = vdupq_n_s16(-(short)(0.34414f * 4096.0f + 0.5f));
  3625. int16x8_t cb_const1 = vdupq_n_s16((short)(1.77200f * 4096.0f + 0.5f));
  3626. for (; i + 7 < count; i += 8) {
  3627. // load
  3628. uint8x8_t y_bytes = vld1_u8(y + i);
  3629. uint8x8_t cr_bytes = vld1_u8(pcr + i);
  3630. uint8x8_t cb_bytes = vld1_u8(pcb + i);
  3631. int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip));
  3632. int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip));
  3633. // expand to s16
  3634. int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4));
  3635. int16x8_t crw = vshll_n_s8(cr_biased, 7);
  3636. int16x8_t cbw = vshll_n_s8(cb_biased, 7);
  3637. // color transform
  3638. int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0);
  3639. int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0);
  3640. int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1);
  3641. int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1);
  3642. int16x8_t rws = vaddq_s16(yws, cr0);
  3643. int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1);
  3644. int16x8_t bws = vaddq_s16(yws, cb1);
  3645. // undo scaling, round, convert to byte
  3646. uint8x8x4_t o;
  3647. o.val[0] = vqrshrun_n_s16(rws, 4);
  3648. o.val[1] = vqrshrun_n_s16(gws, 4);
  3649. o.val[2] = vqrshrun_n_s16(bws, 4);
  3650. o.val[3] = vdup_n_u8(255);
  3651. // store, interleaving r/g/b/a
  3652. vst4_u8(out, o);
  3653. out += 8 * 4;
  3654. }
  3655. }
  3656. #endif
  3657. for (; i < count; ++i) {
  3658. int y_fixed = (y[i] << 20) + (1 << 19); // rounding
  3659. int r, g, b;
  3660. int cr = pcr[i] - 128;
  3661. int cb = pcb[i] - 128;
  3662. r = y_fixed + cr * stbi__float2fixed(1.40200f);
  3663. g = y_fixed + cr * -stbi__float2fixed(0.71414f) +
  3664. ((cb * -stbi__float2fixed(0.34414f)) & 0xffff0000);
  3665. b = y_fixed + cb * stbi__float2fixed(1.77200f);
  3666. r >>= 20;
  3667. g >>= 20;
  3668. b >>= 20;
  3669. if ((unsigned)r > 255) {
  3670. if (r < 0)
  3671. r = 0;
  3672. else
  3673. r = 255;
  3674. }
  3675. if ((unsigned)g > 255) {
  3676. if (g < 0)
  3677. g = 0;
  3678. else
  3679. g = 255;
  3680. }
  3681. if ((unsigned)b > 255) {
  3682. if (b < 0)
  3683. b = 0;
  3684. else
  3685. b = 255;
  3686. }
  3687. out[0] = (stbi_uc)r;
  3688. out[1] = (stbi_uc)g;
  3689. out[2] = (stbi_uc)b;
  3690. out[3] = 255;
  3691. out += step;
  3692. }
  3693. }
  3694. #endif
  3695. // set up the kernels
  3696. static void stbi__setup_jpeg(stbi__jpeg* j) {
  3697. j->idct_block_kernel = stbi__idct_block;
  3698. j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row;
  3699. j->resample_row_hv_2_kernel = stbi__resample_row_hv_2;
  3700. #ifdef STBI_SSE2
  3701. if (stbi__sse2_available()) {
  3702. j->idct_block_kernel = stbi__idct_simd;
  3703. j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
  3704. j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
  3705. }
  3706. #endif
  3707. #ifdef STBI_NEON
  3708. j->idct_block_kernel = stbi__idct_simd;
  3709. j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
  3710. j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
  3711. #endif
  3712. }
  3713. // clean up the temporary component buffers
  3714. static void stbi__cleanup_jpeg(stbi__jpeg* j) {
  3715. stbi__free_jpeg_components(j, j->s->img_n, 0);
  3716. }
  3717. typedef struct {
  3718. resample_row_func resample;
  3719. stbi_uc *line0, *line1;
  3720. int hs, vs; // expansion factor in each axis
  3721. int w_lores; // horizontal pixels pre-expansion
  3722. int ystep; // how far through vertical expansion we are
  3723. int ypos; // which pre-expansion row we're on
  3724. } stbi__resample;
  3725. // fast 0..255 * 0..255 => 0..255 rounded multiplication
  3726. static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) {
  3727. unsigned int t = x * y + 128;
  3728. return (stbi_uc)((t + (t >> 8)) >> 8);
  3729. }
  3730. static stbi_uc* load_jpeg_image(
  3731. stbi__jpeg* z, int* out_x, int* out_y, int* comp, int req_comp) {
  3732. int n, decode_n, is_rgb;
  3733. z->s->img_n = 0; // make stbi__cleanup_jpeg safe
  3734. // validate req_comp
  3735. if (req_comp < 0 || req_comp > 4)
  3736. return stbi__errpuc("bad req_comp", "Internal error");
  3737. // load a jpeg image from whichever source, but leave in YCbCr format
  3738. if (!stbi__decode_jpeg_image(z)) {
  3739. stbi__cleanup_jpeg(z);
  3740. return NULL;
  3741. }
  3742. // determine actual number of components to generate
  3743. n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1;
  3744. is_rgb = z->s->img_n == 3 &&
  3745. (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif));
  3746. if (z->s->img_n == 3 && n < 3 && !is_rgb)
  3747. decode_n = 1;
  3748. else
  3749. decode_n = z->s->img_n;
  3750. // nothing to do if no components requested; check this now to avoid
  3751. // accessing uninitialized coutput[0] later
  3752. if (decode_n <= 0) {
  3753. stbi__cleanup_jpeg(z);
  3754. return NULL;
  3755. }
  3756. // resample and color-convert
  3757. {
  3758. int k;
  3759. unsigned int i, j;
  3760. stbi_uc* output;
  3761. stbi_uc* coutput[4] = {NULL, NULL, NULL, NULL};
  3762. stbi__resample res_comp[4];
  3763. for (k = 0; k < decode_n; ++k) {
  3764. stbi__resample* r = &res_comp[k];
  3765. // allocate line buffer big enough for upsampling off the edges
  3766. // with upsample factor of 4
  3767. z->img_comp[k].linebuf = (stbi_uc*)stbi__malloc(z->s->img_x + 3);
  3768. if (!z->img_comp[k].linebuf) {
  3769. stbi__cleanup_jpeg(z);
  3770. return stbi__errpuc("outofmem", "Out of memory");
  3771. }
  3772. r->hs = z->img_h_max / z->img_comp[k].h;
  3773. r->vs = z->img_v_max / z->img_comp[k].v;
  3774. r->ystep = r->vs >> 1;
  3775. r->w_lores = (z->s->img_x + r->hs - 1) / r->hs;
  3776. r->ypos = 0;
  3777. r->line0 = r->line1 = z->img_comp[k].data;
  3778. if (r->hs == 1 && r->vs == 1)
  3779. r->resample = resample_row_1;
  3780. else if (r->hs == 1 && r->vs == 2)
  3781. r->resample = stbi__resample_row_v_2;
  3782. else if (r->hs == 2 && r->vs == 1)
  3783. r->resample = stbi__resample_row_h_2;
  3784. else if (r->hs == 2 && r->vs == 2)
  3785. r->resample = z->resample_row_hv_2_kernel;
  3786. else
  3787. r->resample = stbi__resample_row_generic;
  3788. }
  3789. // can't error after this so, this is safe
  3790. output = (stbi_uc*)stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1);
  3791. if (!output) {
  3792. stbi__cleanup_jpeg(z);
  3793. return stbi__errpuc("outofmem", "Out of memory");
  3794. }
  3795. // now go ahead and resample
  3796. for (j = 0; j < z->s->img_y; ++j) {
  3797. stbi_uc* out = output + n * z->s->img_x * j;
  3798. for (k = 0; k < decode_n; ++k) {
  3799. stbi__resample* r = &res_comp[k];
  3800. int y_bot = r->ystep >= (r->vs >> 1);
  3801. coutput[k] = r->resample(
  3802. z->img_comp[k].linebuf, y_bot ? r->line1 : r->line0,
  3803. y_bot ? r->line0 : r->line1, r->w_lores, r->hs);
  3804. if (++r->ystep >= r->vs) {
  3805. r->ystep = 0;
  3806. r->line0 = r->line1;
  3807. if (++r->ypos < z->img_comp[k].y)
  3808. r->line1 += z->img_comp[k].w2;
  3809. }
  3810. }
  3811. if (n >= 3) {
  3812. stbi_uc* y = coutput[0];
  3813. if (z->s->img_n == 3) {
  3814. if (is_rgb) {
  3815. for (i = 0; i < z->s->img_x; ++i) {
  3816. out[0] = y[i];
  3817. out[1] = coutput[1][i];
  3818. out[2] = coutput[2][i];
  3819. out[3] = 255;
  3820. out += n;
  3821. }
  3822. } else {
  3823. z->YCbCr_to_RGB_kernel(
  3824. out, y, coutput[1], coutput[2], z->s->img_x, n);
  3825. }
  3826. } else if (z->s->img_n == 4) {
  3827. if (z->app14_color_transform == 0) { // CMYK
  3828. for (i = 0; i < z->s->img_x; ++i) {
  3829. stbi_uc m = coutput[3][i];
  3830. out[0] = stbi__blinn_8x8(coutput[0][i], m);
  3831. out[1] = stbi__blinn_8x8(coutput[1][i], m);
  3832. out[2] = stbi__blinn_8x8(coutput[2][i], m);
  3833. out[3] = 255;
  3834. out += n;
  3835. }
  3836. } else if (z->app14_color_transform == 2) { // YCCK
  3837. z->YCbCr_to_RGB_kernel(
  3838. out, y, coutput[1], coutput[2], z->s->img_x, n);
  3839. for (i = 0; i < z->s->img_x; ++i) {
  3840. stbi_uc m = coutput[3][i];
  3841. out[0] = stbi__blinn_8x8(255 - out[0], m);
  3842. out[1] = stbi__blinn_8x8(255 - out[1], m);
  3843. out[2] = stbi__blinn_8x8(255 - out[2], m);
  3844. out += n;
  3845. }
  3846. } else { // YCbCr + alpha? Ignore the fourth channel for now
  3847. z->YCbCr_to_RGB_kernel(
  3848. out, y, coutput[1], coutput[2], z->s->img_x, n);
  3849. }
  3850. } else
  3851. for (i = 0; i < z->s->img_x; ++i) {
  3852. out[0] = out[1] = out[2] = y[i];
  3853. out[3] = 255; // not used if n==3
  3854. out += n;
  3855. }
  3856. } else {
  3857. if (is_rgb) {
  3858. if (n == 1)
  3859. for (i = 0; i < z->s->img_x; ++i)
  3860. *out++ = stbi__compute_y(
  3861. coutput[0][i], coutput[1][i], coutput[2][i]);
  3862. else {
  3863. for (i = 0; i < z->s->img_x; ++i, out += 2) {
  3864. out[0] = stbi__compute_y(
  3865. coutput[0][i], coutput[1][i], coutput[2][i]);
  3866. out[1] = 255;
  3867. }
  3868. }
  3869. } else if (z->s->img_n == 4 && z->app14_color_transform == 0) {
  3870. for (i = 0; i < z->s->img_x; ++i) {
  3871. stbi_uc m = coutput[3][i];
  3872. stbi_uc r = stbi__blinn_8x8(coutput[0][i], m);
  3873. stbi_uc g = stbi__blinn_8x8(coutput[1][i], m);
  3874. stbi_uc b = stbi__blinn_8x8(coutput[2][i], m);
  3875. out[0] = stbi__compute_y(r, g, b);
  3876. out[1] = 255;
  3877. out += n;
  3878. }
  3879. } else if (z->s->img_n == 4 && z->app14_color_transform == 2) {
  3880. for (i = 0; i < z->s->img_x; ++i) {
  3881. out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]);
  3882. out[1] = 255;
  3883. out += n;
  3884. }
  3885. } else {
  3886. stbi_uc* y = coutput[0];
  3887. if (n == 1)
  3888. for (i = 0; i < z->s->img_x; ++i)
  3889. out[i] = y[i];
  3890. else
  3891. for (i = 0; i < z->s->img_x; ++i) {
  3892. *out++ = y[i];
  3893. *out++ = 255;
  3894. }
  3895. }
  3896. }
  3897. }
  3898. stbi__cleanup_jpeg(z);
  3899. *out_x = z->s->img_x;
  3900. *out_y = z->s->img_y;
  3901. if (comp)
  3902. *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output
  3903. return output;
  3904. }
  3905. }
  3906. static void* stbi__jpeg_load(
  3907. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  3908. stbi__result_info* ri) {
  3909. unsigned char* result;
  3910. stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg));
  3911. if (!j)
  3912. return stbi__errpuc("outofmem", "Out of memory");
  3913. STBI_NOTUSED(ri);
  3914. j->s = s;
  3915. stbi__setup_jpeg(j);
  3916. result = load_jpeg_image(j, x, y, comp, req_comp);
  3917. STBI_FREE(j);
  3918. return result;
  3919. }
  3920. static int stbi__jpeg_test(stbi__context* s) {
  3921. int r;
  3922. stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg));
  3923. if (!j)
  3924. return stbi__err("outofmem", "Out of memory");
  3925. j->s = s;
  3926. stbi__setup_jpeg(j);
  3927. r = stbi__decode_jpeg_header(j, STBI__SCAN_type);
  3928. stbi__rewind(s);
  3929. STBI_FREE(j);
  3930. return r;
  3931. }
  3932. static int stbi__jpeg_info_raw(stbi__jpeg* j, int* x, int* y, int* comp) {
  3933. if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) {
  3934. stbi__rewind(j->s);
  3935. return 0;
  3936. }
  3937. if (x)
  3938. *x = j->s->img_x;
  3939. if (y)
  3940. *y = j->s->img_y;
  3941. if (comp)
  3942. *comp = j->s->img_n >= 3 ? 3 : 1;
  3943. return 1;
  3944. }
  3945. static int stbi__jpeg_info(stbi__context* s, int* x, int* y, int* comp) {
  3946. int result;
  3947. stbi__jpeg* j = (stbi__jpeg*)(stbi__malloc(sizeof(stbi__jpeg)));
  3948. if (!j)
  3949. return stbi__err("outofmem", "Out of memory");
  3950. j->s = s;
  3951. result = stbi__jpeg_info_raw(j, x, y, comp);
  3952. STBI_FREE(j);
  3953. return result;
  3954. }
  3955. #endif
  3956. // public domain zlib decode v0.2 Sean Barrett 2006-11-18
  3957. // simple implementation
  3958. // - all input must be provided in an upfront buffer
  3959. // - all output is written to a single output buffer (can malloc/realloc)
  3960. // performance
  3961. // - fast huffman
  3962. #ifndef STBI_NO_ZLIB
  3963. // fast-way is faster to check than jpeg huffman, but slow way is slower
  3964. #define STBI__ZFAST_BITS 9 // accelerate all cases in default tables
  3965. #define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1)
  3966. #define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet
  3967. // zlib-style huffman encoding
  3968. // (jpegs packs from left, zlib from right, so can't share code)
  3969. typedef struct {
  3970. stbi__uint16 fast[1 << STBI__ZFAST_BITS];
  3971. stbi__uint16 firstcode[16];
  3972. int maxcode[17];
  3973. stbi__uint16 firstsymbol[16];
  3974. stbi_uc size[STBI__ZNSYMS];
  3975. stbi__uint16 value[STBI__ZNSYMS];
  3976. } stbi__zhuffman;
  3977. stbi_inline static int stbi__bitreverse16(int n) {
  3978. n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1);
  3979. n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2);
  3980. n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4);
  3981. n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8);
  3982. return n;
  3983. }
  3984. stbi_inline static int stbi__bit_reverse(int v, int bits) {
  3985. STBI_ASSERT(bits <= 16);
  3986. // to bit reverse n bits, reverse 16 and shift
  3987. // e.g. 11 bits, bit reverse and shift away 5
  3988. return stbi__bitreverse16(v) >> (16 - bits);
  3989. }
  3990. static int stbi__zbuild_huffman(stbi__zhuffman* z, const stbi_uc* sizelist, int num) {
  3991. int i, k = 0;
  3992. int code, next_code[16], sizes[17];
  3993. // DEFLATE spec for generating codes
  3994. memset(sizes, 0, sizeof(sizes));
  3995. memset(z->fast, 0, sizeof(z->fast));
  3996. for (i = 0; i < num; ++i)
  3997. ++sizes[sizelist[i]];
  3998. sizes[0] = 0;
  3999. for (i = 1; i < 16; ++i)
  4000. if (sizes[i] > (1 << i))
  4001. return stbi__err("bad sizes", "Corrupt PNG");
  4002. code = 0;
  4003. for (i = 1; i < 16; ++i) {
  4004. next_code[i] = code;
  4005. z->firstcode[i] = (stbi__uint16)code;
  4006. z->firstsymbol[i] = (stbi__uint16)k;
  4007. code = (code + sizes[i]);
  4008. if (sizes[i])
  4009. if (code - 1 >= (1 << i))
  4010. return stbi__err("bad codelengths", "Corrupt PNG");
  4011. z->maxcode[i] = code << (16 - i); // preshift for inner loop
  4012. code <<= 1;
  4013. k += sizes[i];
  4014. }
  4015. z->maxcode[16] = 0x10000; // sentinel
  4016. for (i = 0; i < num; ++i) {
  4017. int s = sizelist[i];
  4018. if (s) {
  4019. int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s];
  4020. stbi__uint16 fastv = (stbi__uint16)((s << 9) | i);
  4021. z->size[c] = (stbi_uc)s;
  4022. z->value[c] = (stbi__uint16)i;
  4023. if (s <= STBI__ZFAST_BITS) {
  4024. int j = stbi__bit_reverse(next_code[s], s);
  4025. while (j < (1 << STBI__ZFAST_BITS)) {
  4026. z->fast[j] = fastv;
  4027. j += (1 << s);
  4028. }
  4029. }
  4030. ++next_code[s];
  4031. }
  4032. }
  4033. return 1;
  4034. }
  4035. // zlib-from-memory implementation for PNG reading
  4036. // because PNG allows splitting the zlib stream arbitrarily,
  4037. // and it's annoying structurally to have PNG call ZLIB call PNG,
  4038. // we require PNG read all the IDATs and combine them into a single
  4039. // memory buffer
  4040. typedef struct {
  4041. stbi_uc *zbuffer, *zbuffer_end;
  4042. int num_bits;
  4043. stbi__uint32 code_buffer;
  4044. char* zout;
  4045. char* zout_start;
  4046. char* zout_end;
  4047. int z_expandable;
  4048. stbi__zhuffman z_length, z_distance;
  4049. } stbi__zbuf;
  4050. stbi_inline static int stbi__zeof(stbi__zbuf* z) {
  4051. return (z->zbuffer >= z->zbuffer_end);
  4052. }
  4053. stbi_inline static stbi_uc stbi__zget8(stbi__zbuf* z) {
  4054. return stbi__zeof(z) ? 0 : *z->zbuffer++;
  4055. }
  4056. static void stbi__fill_bits(stbi__zbuf* z) {
  4057. do {
  4058. if (z->code_buffer >= (1U << z->num_bits)) {
  4059. z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */
  4060. return;
  4061. }
  4062. z->code_buffer |= (unsigned int)stbi__zget8(z) << z->num_bits;
  4063. z->num_bits += 8;
  4064. } while (z->num_bits <= 24);
  4065. }
  4066. stbi_inline static unsigned int stbi__zreceive(stbi__zbuf* z, int n) {
  4067. unsigned int k;
  4068. if (z->num_bits < n)
  4069. stbi__fill_bits(z);
  4070. k = z->code_buffer & ((1 << n) - 1);
  4071. z->code_buffer >>= n;
  4072. z->num_bits -= n;
  4073. return k;
  4074. }
  4075. static int stbi__zhuffman_decode_slowpath(stbi__zbuf* a, stbi__zhuffman* z) {
  4076. int b, s, k;
  4077. // not resolved by fast table, so compute it the slow way
  4078. // use jpeg approach, which requires MSbits at top
  4079. k = stbi__bit_reverse(a->code_buffer, 16);
  4080. for (s = STBI__ZFAST_BITS + 1;; ++s)
  4081. if (k < z->maxcode[s])
  4082. break;
  4083. if (s >= 16)
  4084. return -1; // invalid code!
  4085. // code size is s, so:
  4086. b = (k >> (16 - s)) - z->firstcode[s] + z->firstsymbol[s];
  4087. if (b >= STBI__ZNSYMS)
  4088. return -1; // some data was corrupt somewhere!
  4089. if (z->size[b] != s)
  4090. return -1; // was originally an assert, but report failure instead.
  4091. a->code_buffer >>= s;
  4092. a->num_bits -= s;
  4093. return z->value[b];
  4094. }
  4095. stbi_inline static int stbi__zhuffman_decode(stbi__zbuf* a, stbi__zhuffman* z) {
  4096. int b, s;
  4097. if (a->num_bits < 16) {
  4098. if (stbi__zeof(a)) {
  4099. return -1; /* report error for unexpected end of data. */
  4100. }
  4101. stbi__fill_bits(a);
  4102. }
  4103. b = z->fast[a->code_buffer & STBI__ZFAST_MASK];
  4104. if (b) {
  4105. s = b >> 9;
  4106. a->code_buffer >>= s;
  4107. a->num_bits -= s;
  4108. return b & 511;
  4109. }
  4110. return stbi__zhuffman_decode_slowpath(a, z);
  4111. }
  4112. static int stbi__zexpand(
  4113. stbi__zbuf* z, char* zout, int n) // need to make room for n bytes
  4114. {
  4115. char* q;
  4116. unsigned int cur, limit, old_limit;
  4117. z->zout = zout;
  4118. if (!z->z_expandable)
  4119. return stbi__err("output buffer limit", "Corrupt PNG");
  4120. cur = (unsigned int)(z->zout - z->zout_start);
  4121. limit = old_limit = (unsigned)(z->zout_end - z->zout_start);
  4122. if (UINT_MAX - cur < (unsigned)n)
  4123. return stbi__err("outofmem", "Out of memory");
  4124. while (cur + n > limit) {
  4125. if (limit > UINT_MAX / 2)
  4126. return stbi__err("outofmem", "Out of memory");
  4127. limit *= 2;
  4128. }
  4129. q = (char*)STBI_REALLOC_SIZED(z->zout_start, old_limit, limit);
  4130. STBI_NOTUSED(old_limit);
  4131. if (q == NULL)
  4132. return stbi__err("outofmem", "Out of memory");
  4133. z->zout_start = q;
  4134. z->zout = q + cur;
  4135. z->zout_end = q + limit;
  4136. return 1;
  4137. }
  4138. static const int stbi__zlength_base[31] = {
  4139. 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
  4140. 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
  4141. static const int stbi__zlength_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
  4142. 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
  4143. 4, 4, 5, 5, 5, 5, 0, 0, 0};
  4144. static const int stbi__zdist_base[32] = {
  4145. 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
  4146. 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
  4147. 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
  4148. static const int stbi__zdist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
  4149. 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
  4150. 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
  4151. static int stbi__parse_huffman_block(stbi__zbuf* a) {
  4152. char* zout = a->zout;
  4153. for (;;) {
  4154. int z = stbi__zhuffman_decode(a, &a->z_length);
  4155. if (z < 256) {
  4156. if (z < 0)
  4157. return stbi__err(
  4158. "bad huffman code", "Corrupt PNG"); // error in huffman codes
  4159. if (zout >= a->zout_end) {
  4160. if (!stbi__zexpand(a, zout, 1))
  4161. return 0;
  4162. zout = a->zout;
  4163. }
  4164. *zout++ = (char)z;
  4165. } else {
  4166. stbi_uc* p;
  4167. int len, dist;
  4168. if (z == 256) {
  4169. a->zout = zout;
  4170. return 1;
  4171. }
  4172. z -= 257;
  4173. len = stbi__zlength_base[z];
  4174. if (stbi__zlength_extra[z])
  4175. len += stbi__zreceive(a, stbi__zlength_extra[z]);
  4176. z = stbi__zhuffman_decode(a, &a->z_distance);
  4177. if (z < 0)
  4178. return stbi__err("bad huffman code", "Corrupt PNG");
  4179. dist = stbi__zdist_base[z];
  4180. if (stbi__zdist_extra[z])
  4181. dist += stbi__zreceive(a, stbi__zdist_extra[z]);
  4182. if (zout - a->zout_start < dist)
  4183. return stbi__err("bad dist", "Corrupt PNG");
  4184. if (zout + len > a->zout_end) {
  4185. if (!stbi__zexpand(a, zout, len))
  4186. return 0;
  4187. zout = a->zout;
  4188. }
  4189. p = (stbi_uc*)(zout - dist);
  4190. if (dist == 1) { // run of one byte; common in images.
  4191. stbi_uc v = *p;
  4192. if (len) {
  4193. do
  4194. *zout++ = v;
  4195. while (--len);
  4196. }
  4197. } else {
  4198. if (len) {
  4199. do
  4200. *zout++ = *p++;
  4201. while (--len);
  4202. }
  4203. }
  4204. }
  4205. }
  4206. }
  4207. static int stbi__compute_huffman_codes(stbi__zbuf* a) {
  4208. static const stbi_uc length_dezigzag[19] = {16, 17, 18, 0, 8, 7, 9, 6, 10, 5,
  4209. 11, 4, 12, 3, 13, 2, 14, 1, 15};
  4210. stbi__zhuffman z_codelength;
  4211. stbi_uc lencodes[286 + 32 + 137]; // padding for maximum single op
  4212. stbi_uc codelength_sizes[19];
  4213. int i, n;
  4214. int hlit = stbi__zreceive(a, 5) + 257;
  4215. int hdist = stbi__zreceive(a, 5) + 1;
  4216. int hclen = stbi__zreceive(a, 4) + 4;
  4217. int ntot = hlit + hdist;
  4218. memset(codelength_sizes, 0, sizeof(codelength_sizes));
  4219. for (i = 0; i < hclen; ++i) {
  4220. int s = stbi__zreceive(a, 3);
  4221. codelength_sizes[length_dezigzag[i]] = (stbi_uc)s;
  4222. }
  4223. if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19))
  4224. return 0;
  4225. n = 0;
  4226. while (n < ntot) {
  4227. int c = stbi__zhuffman_decode(a, &z_codelength);
  4228. if (c < 0 || c >= 19)
  4229. return stbi__err("bad codelengths", "Corrupt PNG");
  4230. if (c < 16)
  4231. lencodes[n++] = (stbi_uc)c;
  4232. else {
  4233. stbi_uc fill = 0;
  4234. if (c == 16) {
  4235. c = stbi__zreceive(a, 2) + 3;
  4236. if (n == 0)
  4237. return stbi__err("bad codelengths", "Corrupt PNG");
  4238. fill = lencodes[n - 1];
  4239. } else if (c == 17) {
  4240. c = stbi__zreceive(a, 3) + 3;
  4241. } else if (c == 18) {
  4242. c = stbi__zreceive(a, 7) + 11;
  4243. } else {
  4244. return stbi__err("bad codelengths", "Corrupt PNG");
  4245. }
  4246. if (ntot - n < c)
  4247. return stbi__err("bad codelengths", "Corrupt PNG");
  4248. memset(lencodes + n, fill, c);
  4249. n += c;
  4250. }
  4251. }
  4252. if (n != ntot)
  4253. return stbi__err("bad codelengths", "Corrupt PNG");
  4254. if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit))
  4255. return 0;
  4256. if (!stbi__zbuild_huffman(&a->z_distance, lencodes + hlit, hdist))
  4257. return 0;
  4258. return 1;
  4259. }
  4260. static int stbi__parse_uncompressed_block(stbi__zbuf* a) {
  4261. stbi_uc header[4];
  4262. int len, nlen, k;
  4263. if (a->num_bits & 7)
  4264. stbi__zreceive(a, a->num_bits & 7); // discard
  4265. // drain the bit-packed data into header
  4266. k = 0;
  4267. while (a->num_bits > 0) {
  4268. header[k++] = (stbi_uc)(a->code_buffer & 255); // suppress MSVC run-time check
  4269. a->code_buffer >>= 8;
  4270. a->num_bits -= 8;
  4271. }
  4272. if (a->num_bits < 0)
  4273. return stbi__err("zlib corrupt", "Corrupt PNG");
  4274. // now fill header the normal way
  4275. while (k < 4)
  4276. header[k++] = stbi__zget8(a);
  4277. len = header[1] * 256 + header[0];
  4278. nlen = header[3] * 256 + header[2];
  4279. if (nlen != (len ^ 0xffff))
  4280. return stbi__err("zlib corrupt", "Corrupt PNG");
  4281. if (a->zbuffer + len > a->zbuffer_end)
  4282. return stbi__err("read past buffer", "Corrupt PNG");
  4283. if (a->zout + len > a->zout_end)
  4284. if (!stbi__zexpand(a, a->zout, len))
  4285. return 0;
  4286. memcpy(a->zout, a->zbuffer, len);
  4287. a->zbuffer += len;
  4288. a->zout += len;
  4289. return 1;
  4290. }
  4291. static int stbi__parse_zlib_header(stbi__zbuf* a) {
  4292. int cmf = stbi__zget8(a);
  4293. int cm = cmf & 15;
  4294. /* int cinfo = cmf >> 4; */
  4295. int flg = stbi__zget8(a);
  4296. if (stbi__zeof(a))
  4297. return stbi__err("bad zlib header", "Corrupt PNG"); // zlib spec
  4298. if ((cmf * 256 + flg) % 31 != 0)
  4299. return stbi__err("bad zlib header", "Corrupt PNG"); // zlib spec
  4300. if (flg & 32)
  4301. return stbi__err(
  4302. "no preset dict",
  4303. "Corrupt PNG"); // preset dictionary not allowed in png
  4304. if (cm != 8)
  4305. return stbi__err("bad compression", "Corrupt PNG"); // DEFLATE required for png
  4306. // window = 1 << (8 + cinfo)... but who cares, we fully buffer output
  4307. return 1;
  4308. }
  4309. static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = {
  4310. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  4311. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  4312. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  4313. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  4314. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  4315. 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  4316. 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  4317. 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  4318. 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  4319. 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  4320. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8};
  4321. static const stbi_uc stbi__zdefault_distance[32] = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  4322. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  4323. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5};
  4324. /*
  4325. Init algorithm:
  4326. {
  4327. int i; // use <= to match clearly with spec
  4328. for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8;
  4329. for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9;
  4330. for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7;
  4331. for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8;
  4332. for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5;
  4333. }
  4334. */
  4335. static int stbi__parse_zlib(stbi__zbuf* a, int parse_header) {
  4336. int final, type;
  4337. if (parse_header)
  4338. if (!stbi__parse_zlib_header(a))
  4339. return 0;
  4340. a->num_bits = 0;
  4341. a->code_buffer = 0;
  4342. do {
  4343. final = stbi__zreceive(a, 1);
  4344. type = stbi__zreceive(a, 2);
  4345. if (type == 0) {
  4346. if (!stbi__parse_uncompressed_block(a))
  4347. return 0;
  4348. } else if (type == 3) {
  4349. return 0;
  4350. } else {
  4351. if (type == 1) {
  4352. // use fixed code lengths
  4353. if (!stbi__zbuild_huffman(
  4354. &a->z_length, stbi__zdefault_length, STBI__ZNSYMS))
  4355. return 0;
  4356. if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32))
  4357. return 0;
  4358. } else {
  4359. if (!stbi__compute_huffman_codes(a))
  4360. return 0;
  4361. }
  4362. if (!stbi__parse_huffman_block(a))
  4363. return 0;
  4364. }
  4365. } while (!final);
  4366. return 1;
  4367. }
  4368. static int stbi__do_zlib(
  4369. stbi__zbuf* a, char* obuf, int olen, int exp, int parse_header) {
  4370. a->zout_start = obuf;
  4371. a->zout = obuf;
  4372. a->zout_end = obuf + olen;
  4373. a->z_expandable = exp;
  4374. return stbi__parse_zlib(a, parse_header);
  4375. }
  4376. STBIDEF char* stbi_zlib_decode_malloc_guesssize(
  4377. const char* buffer, int len, int initial_size, int* outlen) {
  4378. stbi__zbuf a;
  4379. char* p = (char*)stbi__malloc(initial_size);
  4380. if (p == NULL)
  4381. return NULL;
  4382. a.zbuffer = (stbi_uc*)buffer;
  4383. a.zbuffer_end = (stbi_uc*)buffer + len;
  4384. if (stbi__do_zlib(&a, p, initial_size, 1, 1)) {
  4385. if (outlen)
  4386. *outlen = (int)(a.zout - a.zout_start);
  4387. return a.zout_start;
  4388. } else {
  4389. STBI_FREE(a.zout_start);
  4390. return NULL;
  4391. }
  4392. }
  4393. STBIDEF char* stbi_zlib_decode_malloc(char const* buffer, int len, int* outlen) {
  4394. return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen);
  4395. }
  4396. STBIDEF char* stbi_zlib_decode_malloc_guesssize_headerflag(
  4397. const char* buffer, int len, int initial_size, int* outlen, int parse_header) {
  4398. stbi__zbuf a;
  4399. char* p = (char*)stbi__malloc(initial_size);
  4400. if (p == NULL)
  4401. return NULL;
  4402. a.zbuffer = (stbi_uc*)buffer;
  4403. a.zbuffer_end = (stbi_uc*)buffer + len;
  4404. if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) {
  4405. if (outlen)
  4406. *outlen = (int)(a.zout - a.zout_start);
  4407. return a.zout_start;
  4408. } else {
  4409. STBI_FREE(a.zout_start);
  4410. return NULL;
  4411. }
  4412. }
  4413. STBIDEF int stbi_zlib_decode_buffer(
  4414. char* obuffer, int olen, char const* ibuffer, int ilen) {
  4415. stbi__zbuf a;
  4416. a.zbuffer = (stbi_uc*)ibuffer;
  4417. a.zbuffer_end = (stbi_uc*)ibuffer + ilen;
  4418. if (stbi__do_zlib(&a, obuffer, olen, 0, 1))
  4419. return (int)(a.zout - a.zout_start);
  4420. else
  4421. return -1;
  4422. }
  4423. STBIDEF char* stbi_zlib_decode_noheader_malloc(
  4424. char const* buffer, int len, int* outlen) {
  4425. stbi__zbuf a;
  4426. char* p = (char*)stbi__malloc(16384);
  4427. if (p == NULL)
  4428. return NULL;
  4429. a.zbuffer = (stbi_uc*)buffer;
  4430. a.zbuffer_end = (stbi_uc*)buffer + len;
  4431. if (stbi__do_zlib(&a, p, 16384, 1, 0)) {
  4432. if (outlen)
  4433. *outlen = (int)(a.zout - a.zout_start);
  4434. return a.zout_start;
  4435. } else {
  4436. STBI_FREE(a.zout_start);
  4437. return NULL;
  4438. }
  4439. }
  4440. STBIDEF int stbi_zlib_decode_noheader_buffer(
  4441. char* obuffer, int olen, const char* ibuffer, int ilen) {
  4442. stbi__zbuf a;
  4443. a.zbuffer = (stbi_uc*)ibuffer;
  4444. a.zbuffer_end = (stbi_uc*)ibuffer + ilen;
  4445. if (stbi__do_zlib(&a, obuffer, olen, 0, 0))
  4446. return (int)(a.zout - a.zout_start);
  4447. else
  4448. return -1;
  4449. }
  4450. #endif
  4451. // public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18
  4452. // simple implementation
  4453. // - only 8-bit samples
  4454. // - no CRC checking
  4455. // - allocates lots of intermediate memory
  4456. // - avoids problem of streaming data between subsystems
  4457. // - avoids explicit window management
  4458. // performance
  4459. // - uses stb_zlib, a PD zlib implementation with fast huffman decoding
  4460. #ifndef STBI_NO_PNG
  4461. typedef struct {
  4462. stbi__uint32 length;
  4463. stbi__uint32 type;
  4464. } stbi__pngchunk;
  4465. static stbi__pngchunk stbi__get_chunk_header(stbi__context* s) {
  4466. stbi__pngchunk c;
  4467. c.length = stbi__get32be(s);
  4468. c.type = stbi__get32be(s);
  4469. return c;
  4470. }
  4471. static int stbi__check_png_header(stbi__context* s) {
  4472. static const stbi_uc png_sig[8] = {137, 80, 78, 71, 13, 10, 26, 10};
  4473. int i;
  4474. for (i = 0; i < 8; ++i)
  4475. if (stbi__get8(s) != png_sig[i])
  4476. return stbi__err("bad png sig", "Not a PNG");
  4477. return 1;
  4478. }
  4479. typedef struct {
  4480. stbi__context* s;
  4481. stbi_uc *idata, *expanded, *out;
  4482. int depth;
  4483. } stbi__png;
  4484. enum {
  4485. STBI__F_none = 0,
  4486. STBI__F_sub = 1,
  4487. STBI__F_up = 2,
  4488. STBI__F_avg = 3,
  4489. STBI__F_paeth = 4,
  4490. // synthetic filters used for first scanline to avoid needing a dummy row of 0s
  4491. STBI__F_avg_first,
  4492. STBI__F_paeth_first
  4493. };
  4494. static stbi_uc first_row_filter[5] = {
  4495. STBI__F_none, STBI__F_sub, STBI__F_none, STBI__F_avg_first,
  4496. STBI__F_paeth_first};
  4497. static int stbi__paeth(int a, int b, int c) {
  4498. int p = a + b - c;
  4499. int pa = abs(p - a);
  4500. int pb = abs(p - b);
  4501. int pc = abs(p - c);
  4502. if (pa <= pb && pa <= pc)
  4503. return a;
  4504. if (pb <= pc)
  4505. return b;
  4506. return c;
  4507. }
  4508. static const stbi_uc stbi__depth_scale_table[9] = {0, 0xff, 0x55, 0, 0x11,
  4509. 0, 0, 0, 0x01};
  4510. // create the png data from post-deflated data
  4511. static int stbi__create_png_image_raw(
  4512. stbi__png* a, stbi_uc* raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x,
  4513. stbi__uint32 y, int depth, int color) {
  4514. int bytes = (depth == 16 ? 2 : 1);
  4515. stbi__context* s = a->s;
  4516. stbi__uint32 i, j, stride = x * out_n * bytes;
  4517. stbi__uint32 img_len, img_width_bytes;
  4518. int k;
  4519. int img_n = s->img_n; // copy it into a local for later
  4520. int output_bytes = out_n * bytes;
  4521. int filter_bytes = img_n * bytes;
  4522. int width = x;
  4523. STBI_ASSERT(out_n == s->img_n || out_n == s->img_n + 1);
  4524. a->out = (stbi_uc*)stbi__malloc_mad3(
  4525. x, y, output_bytes, 0); // extra bytes to write off the end into
  4526. if (!a->out)
  4527. return stbi__err("outofmem", "Out of memory");
  4528. if (!stbi__mad3sizes_valid(img_n, x, depth, 7))
  4529. return stbi__err("too large", "Corrupt PNG");
  4530. img_width_bytes = (((img_n * x * depth) + 7) >> 3);
  4531. img_len = (img_width_bytes + 1) * y;
  4532. // we used to check for exact match between raw_len and img_len on non-interlaced
  4533. // PNGs, but issue #276 reported a PNG in the wild that had extra data at the end
  4534. // (all zeros), so just check for raw_len < img_len always.
  4535. if (raw_len < img_len)
  4536. return stbi__err("not enough pixels", "Corrupt PNG");
  4537. for (j = 0; j < y; ++j) {
  4538. stbi_uc* cur = a->out + stride * j;
  4539. stbi_uc* prior;
  4540. int filter = *raw++;
  4541. if (filter > 4)
  4542. return stbi__err("invalid filter", "Corrupt PNG");
  4543. if (depth < 8) {
  4544. if (img_width_bytes > x)
  4545. return stbi__err("invalid width", "Corrupt PNG");
  4546. cur += x * out_n -
  4547. img_width_bytes; // store output to the rightmost img_len bytes, so
  4548. // we can decode in place
  4549. filter_bytes = 1;
  4550. width = img_width_bytes;
  4551. }
  4552. prior = cur -
  4553. stride; // bugfix: need to compute this after 'cur +=' computation above
  4554. // if first row, use special filter that doesn't sample previous row
  4555. if (j == 0)
  4556. filter = first_row_filter[filter];
  4557. // handle first byte explicitly
  4558. for (k = 0; k < filter_bytes; ++k) {
  4559. switch (filter) {
  4560. case STBI__F_none:
  4561. cur[k] = raw[k];
  4562. break;
  4563. case STBI__F_sub:
  4564. cur[k] = raw[k];
  4565. break;
  4566. case STBI__F_up:
  4567. cur[k] = STBI__BYTECAST(raw[k] + prior[k]);
  4568. break;
  4569. case STBI__F_avg:
  4570. cur[k] = STBI__BYTECAST(raw[k] + (prior[k] >> 1));
  4571. break;
  4572. case STBI__F_paeth:
  4573. cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0, prior[k], 0));
  4574. break;
  4575. case STBI__F_avg_first:
  4576. cur[k] = raw[k];
  4577. break;
  4578. case STBI__F_paeth_first:
  4579. cur[k] = raw[k];
  4580. break;
  4581. }
  4582. }
  4583. if (depth == 8) {
  4584. if (img_n != out_n)
  4585. cur[img_n] = 255; // first pixel
  4586. raw += img_n;
  4587. cur += out_n;
  4588. prior += out_n;
  4589. } else if (depth == 16) {
  4590. if (img_n != out_n) {
  4591. cur[filter_bytes] = 255; // first pixel top byte
  4592. cur[filter_bytes + 1] = 255; // first pixel bottom byte
  4593. }
  4594. raw += filter_bytes;
  4595. cur += output_bytes;
  4596. prior += output_bytes;
  4597. } else {
  4598. raw += 1;
  4599. cur += 1;
  4600. prior += 1;
  4601. }
  4602. // this is a little gross, so that we don't switch per-pixel or per-component
  4603. if (depth < 8 || img_n == out_n) {
  4604. int nk = (width - 1) * filter_bytes;
  4605. #define STBI__CASE(f) \
  4606. case f: \
  4607. for (k = 0; k < nk; ++k)
  4608. switch (filter) {
  4609. // "none" filter turns into a memcpy here; make that explicit.
  4610. case STBI__F_none:
  4611. memcpy(cur, raw, nk);
  4612. break;
  4613. STBI__CASE(STBI__F_sub) {
  4614. cur[k] = STBI__BYTECAST(raw[k] + cur[k - filter_bytes]);
  4615. }
  4616. break;
  4617. STBI__CASE(STBI__F_up) {
  4618. cur[k] = STBI__BYTECAST(raw[k] + prior[k]);
  4619. }
  4620. break;
  4621. STBI__CASE(STBI__F_avg) {
  4622. cur[k] = STBI__BYTECAST(
  4623. raw[k] + ((prior[k] + cur[k - filter_bytes]) >> 1));
  4624. }
  4625. break;
  4626. STBI__CASE(STBI__F_paeth) {
  4627. cur[k] = STBI__BYTECAST(
  4628. raw[k] + stbi__paeth(
  4629. cur[k - filter_bytes], prior[k],
  4630. prior[k - filter_bytes]));
  4631. }
  4632. break;
  4633. STBI__CASE(STBI__F_avg_first) {
  4634. cur[k] = STBI__BYTECAST(raw[k] + (cur[k - filter_bytes] >> 1));
  4635. }
  4636. break;
  4637. STBI__CASE(STBI__F_paeth_first) {
  4638. cur[k] = STBI__BYTECAST(
  4639. raw[k] + stbi__paeth(cur[k - filter_bytes], 0, 0));
  4640. }
  4641. break;
  4642. }
  4643. #undef STBI__CASE
  4644. raw += nk;
  4645. } else {
  4646. STBI_ASSERT(img_n + 1 == out_n);
  4647. #define STBI__CASE(f) \
  4648. case f: \
  4649. for (i = x - 1; i >= 1; --i, cur[filter_bytes] = 255, raw += filter_bytes, \
  4650. cur += output_bytes, prior += output_bytes) \
  4651. for (k = 0; k < filter_bytes; ++k)
  4652. switch (filter) {
  4653. STBI__CASE(STBI__F_none) { cur[k] = raw[k]; }
  4654. break;
  4655. STBI__CASE(STBI__F_sub) {
  4656. cur[k] = STBI__BYTECAST(raw[k] + cur[k - output_bytes]);
  4657. }
  4658. break;
  4659. STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); }
  4660. break;
  4661. STBI__CASE(STBI__F_avg) {
  4662. cur[k] = STBI__BYTECAST(
  4663. raw[k] + ((prior[k] + cur[k - output_bytes]) >> 1));
  4664. }
  4665. break;
  4666. STBI__CASE(STBI__F_paeth) {
  4667. cur[k] = STBI__BYTECAST(
  4668. raw[k] + stbi__paeth(
  4669. cur[k - output_bytes], prior[k],
  4670. prior[k - output_bytes]));
  4671. }
  4672. break;
  4673. STBI__CASE(STBI__F_avg_first) {
  4674. cur[k] = STBI__BYTECAST(raw[k] + (cur[k - output_bytes] >> 1));
  4675. }
  4676. break;
  4677. STBI__CASE(STBI__F_paeth_first) {
  4678. cur[k] = STBI__BYTECAST(
  4679. raw[k] + stbi__paeth(cur[k - output_bytes], 0, 0));
  4680. }
  4681. break;
  4682. }
  4683. #undef STBI__CASE
  4684. // the loop above sets the high byte of the pixels' alpha, but for
  4685. // 16 bit png files we also need the low byte set. we'll do that here.
  4686. if (depth == 16) {
  4687. cur = a->out + stride * j; // start at the beginning of the row again
  4688. for (i = 0; i < x; ++i, cur += output_bytes) {
  4689. cur[filter_bytes + 1] = 255;
  4690. }
  4691. }
  4692. }
  4693. }
  4694. // we make a separate pass to expand bits to pixels; for performance,
  4695. // this could run two scanlines behind the above code, so it won't
  4696. // intefere with filtering but will still be in the cache.
  4697. if (depth < 8) {
  4698. for (j = 0; j < y; ++j) {
  4699. stbi_uc* cur = a->out + stride * j;
  4700. stbi_uc* in = a->out + stride * j + x * out_n - img_width_bytes;
  4701. // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit
  4702. // path optimal at minimal cost for 1/2/4-bit png guarante byte alignment,
  4703. // if width is not multiple of 8/4/2 we'll decode dummy trailing data that
  4704. // will be skipped in the later loop
  4705. stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth]
  4706. : 1; // scale grayscale values to 0..255 range
  4707. // note that the final byte might overshoot and write more data than
  4708. // desired. we can allocate enough data that this never writes out of
  4709. // memory, but it could also overwrite the next scanline. can it overwrite
  4710. // non-empty data on the next scanline? yes, consider 1-pixel-wide scanlines
  4711. // with 1-bit-per-pixel. so we need to explicitly clamp the final ones
  4712. if (depth == 4) {
  4713. for (k = x * img_n; k >= 2; k -= 2, ++in) {
  4714. *cur++ = scale * ((*in >> 4));
  4715. *cur++ = scale * ((*in) & 0x0f);
  4716. }
  4717. if (k > 0)
  4718. *cur++ = scale * ((*in >> 4));
  4719. } else if (depth == 2) {
  4720. for (k = x * img_n; k >= 4; k -= 4, ++in) {
  4721. *cur++ = scale * ((*in >> 6));
  4722. *cur++ = scale * ((*in >> 4) & 0x03);
  4723. *cur++ = scale * ((*in >> 2) & 0x03);
  4724. *cur++ = scale * ((*in) & 0x03);
  4725. }
  4726. if (k > 0)
  4727. *cur++ = scale * ((*in >> 6));
  4728. if (k > 1)
  4729. *cur++ = scale * ((*in >> 4) & 0x03);
  4730. if (k > 2)
  4731. *cur++ = scale * ((*in >> 2) & 0x03);
  4732. } else if (depth == 1) {
  4733. for (k = x * img_n; k >= 8; k -= 8, ++in) {
  4734. *cur++ = scale * ((*in >> 7));
  4735. *cur++ = scale * ((*in >> 6) & 0x01);
  4736. *cur++ = scale * ((*in >> 5) & 0x01);
  4737. *cur++ = scale * ((*in >> 4) & 0x01);
  4738. *cur++ = scale * ((*in >> 3) & 0x01);
  4739. *cur++ = scale * ((*in >> 2) & 0x01);
  4740. *cur++ = scale * ((*in >> 1) & 0x01);
  4741. *cur++ = scale * ((*in) & 0x01);
  4742. }
  4743. if (k > 0)
  4744. *cur++ = scale * ((*in >> 7));
  4745. if (k > 1)
  4746. *cur++ = scale * ((*in >> 6) & 0x01);
  4747. if (k > 2)
  4748. *cur++ = scale * ((*in >> 5) & 0x01);
  4749. if (k > 3)
  4750. *cur++ = scale * ((*in >> 4) & 0x01);
  4751. if (k > 4)
  4752. *cur++ = scale * ((*in >> 3) & 0x01);
  4753. if (k > 5)
  4754. *cur++ = scale * ((*in >> 2) & 0x01);
  4755. if (k > 6)
  4756. *cur++ = scale * ((*in >> 1) & 0x01);
  4757. }
  4758. if (img_n != out_n) {
  4759. int q;
  4760. // insert alpha = 255
  4761. cur = a->out + stride * j;
  4762. if (img_n == 1) {
  4763. for (q = x - 1; q >= 0; --q) {
  4764. cur[q * 2 + 1] = 255;
  4765. cur[q * 2 + 0] = cur[q];
  4766. }
  4767. } else {
  4768. STBI_ASSERT(img_n == 3);
  4769. for (q = x - 1; q >= 0; --q) {
  4770. cur[q * 4 + 3] = 255;
  4771. cur[q * 4 + 2] = cur[q * 3 + 2];
  4772. cur[q * 4 + 1] = cur[q * 3 + 1];
  4773. cur[q * 4 + 0] = cur[q * 3 + 0];
  4774. }
  4775. }
  4776. }
  4777. }
  4778. } else if (depth == 16) {
  4779. // force the image data from big-endian to platform-native.
  4780. // this is done in a separate pass due to the decoding relying
  4781. // on the data being untouched, but could probably be done
  4782. // per-line during decode if care is taken.
  4783. stbi_uc* cur = a->out;
  4784. stbi__uint16* cur16 = (stbi__uint16*)cur;
  4785. for (i = 0; i < x * y * out_n; ++i, cur16++, cur += 2) {
  4786. *cur16 = (cur[0] << 8) | cur[1];
  4787. }
  4788. }
  4789. return 1;
  4790. }
  4791. static int stbi__create_png_image(
  4792. stbi__png* a, stbi_uc* image_data, stbi__uint32 image_data_len, int out_n,
  4793. int depth, int color, int interlaced) {
  4794. int bytes = (depth == 16 ? 2 : 1);
  4795. int out_bytes = out_n * bytes;
  4796. stbi_uc* final;
  4797. int p;
  4798. if (!interlaced)
  4799. return stbi__create_png_image_raw(
  4800. a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth,
  4801. color);
  4802. // de-interlacing
  4803. final = (stbi_uc*)stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0);
  4804. if (!final)
  4805. return stbi__err("outofmem", "Out of memory");
  4806. for (p = 0; p < 7; ++p) {
  4807. int xorig[] = {0, 4, 0, 2, 0, 1, 0};
  4808. int yorig[] = {0, 0, 4, 0, 2, 0, 1};
  4809. int xspc[] = {8, 8, 4, 4, 2, 2, 1};
  4810. int yspc[] = {8, 8, 8, 4, 4, 2, 2};
  4811. int i, j, x, y;
  4812. // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1
  4813. x = (a->s->img_x - xorig[p] + xspc[p] - 1) / xspc[p];
  4814. y = (a->s->img_y - yorig[p] + yspc[p] - 1) / yspc[p];
  4815. if (x && y) {
  4816. stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y;
  4817. if (!stbi__create_png_image_raw(
  4818. a, image_data, image_data_len, out_n, x, y, depth, color)) {
  4819. STBI_FREE(final);
  4820. return 0;
  4821. }
  4822. for (j = 0; j < y; ++j) {
  4823. for (i = 0; i < x; ++i) {
  4824. int out_y = j * yspc[p] + yorig[p];
  4825. int out_x = i * xspc[p] + xorig[p];
  4826. memcpy(final + out_y * a->s->img_x * out_bytes + out_x * out_bytes,
  4827. a->out + (j * x + i) * out_bytes, out_bytes);
  4828. }
  4829. }
  4830. STBI_FREE(a->out);
  4831. image_data += img_len;
  4832. image_data_len -= img_len;
  4833. }
  4834. }
  4835. a->out = final;
  4836. return 1;
  4837. }
  4838. static int stbi__compute_transparency(stbi__png* z, stbi_uc tc[3], int out_n) {
  4839. stbi__context* s = z->s;
  4840. stbi__uint32 i, pixel_count = s->img_x * s->img_y;
  4841. stbi_uc* p = z->out;
  4842. // compute color-based transparency, assuming we've
  4843. // already got 255 as the alpha value in the output
  4844. STBI_ASSERT(out_n == 2 || out_n == 4);
  4845. if (out_n == 2) {
  4846. for (i = 0; i < pixel_count; ++i) {
  4847. p[1] = (p[0] == tc[0] ? 0 : 255);
  4848. p += 2;
  4849. }
  4850. } else {
  4851. for (i = 0; i < pixel_count; ++i) {
  4852. if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
  4853. p[3] = 0;
  4854. p += 4;
  4855. }
  4856. }
  4857. return 1;
  4858. }
  4859. static int stbi__compute_transparency16(stbi__png* z, stbi__uint16 tc[3], int out_n) {
  4860. stbi__context* s = z->s;
  4861. stbi__uint32 i, pixel_count = s->img_x * s->img_y;
  4862. stbi__uint16* p = (stbi__uint16*)z->out;
  4863. // compute color-based transparency, assuming we've
  4864. // already got 65535 as the alpha value in the output
  4865. STBI_ASSERT(out_n == 2 || out_n == 4);
  4866. if (out_n == 2) {
  4867. for (i = 0; i < pixel_count; ++i) {
  4868. p[1] = (p[0] == tc[0] ? 0 : 65535);
  4869. p += 2;
  4870. }
  4871. } else {
  4872. for (i = 0; i < pixel_count; ++i) {
  4873. if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
  4874. p[3] = 0;
  4875. p += 4;
  4876. }
  4877. }
  4878. return 1;
  4879. }
  4880. static int stbi__expand_png_palette(
  4881. stbi__png* a, stbi_uc* palette, int len, int pal_img_n) {
  4882. stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y;
  4883. stbi_uc *p, *temp_out, *orig = a->out;
  4884. p = (stbi_uc*)stbi__malloc_mad2(pixel_count, pal_img_n, 0);
  4885. if (p == NULL)
  4886. return stbi__err("outofmem", "Out of memory");
  4887. // between here and free(out) below, exitting would leak
  4888. temp_out = p;
  4889. if (pal_img_n == 3) {
  4890. for (i = 0; i < pixel_count; ++i) {
  4891. int n = orig[i] * 4;
  4892. p[0] = palette[n];
  4893. p[1] = palette[n + 1];
  4894. p[2] = palette[n + 2];
  4895. p += 3;
  4896. }
  4897. } else {
  4898. for (i = 0; i < pixel_count; ++i) {
  4899. int n = orig[i] * 4;
  4900. p[0] = palette[n];
  4901. p[1] = palette[n + 1];
  4902. p[2] = palette[n + 2];
  4903. p[3] = palette[n + 3];
  4904. p += 4;
  4905. }
  4906. }
  4907. STBI_FREE(a->out);
  4908. a->out = temp_out;
  4909. STBI_NOTUSED(len);
  4910. return 1;
  4911. }
  4912. static int stbi__unpremultiply_on_load_global = 0;
  4913. static int stbi__de_iphone_flag_global = 0;
  4914. STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) {
  4915. stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply;
  4916. }
  4917. STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) {
  4918. stbi__de_iphone_flag_global = flag_true_if_should_convert;
  4919. }
  4920. #ifndef STBI_THREAD_LOCAL
  4921. #define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global
  4922. #define stbi__de_iphone_flag stbi__de_iphone_flag_global
  4923. #else
  4924. static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local,
  4925. stbi__unpremultiply_on_load_set;
  4926. static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set;
  4927. STBIDEF void stbi__unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) {
  4928. stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply;
  4929. stbi__unpremultiply_on_load_set = 1;
  4930. }
  4931. STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) {
  4932. stbi__de_iphone_flag_local = flag_true_if_should_convert;
  4933. stbi__de_iphone_flag_set = 1;
  4934. }
  4935. #define stbi__unpremultiply_on_load \
  4936. (stbi__unpremultiply_on_load_set ? stbi__unpremultiply_on_load_local \
  4937. : stbi__unpremultiply_on_load_global)
  4938. #define stbi__de_iphone_flag \
  4939. (stbi__de_iphone_flag_set ? stbi__de_iphone_flag_local \
  4940. : stbi__de_iphone_flag_global)
  4941. #endif // STBI_THREAD_LOCAL
  4942. static void stbi__de_iphone(stbi__png* z) {
  4943. stbi__context* s = z->s;
  4944. stbi__uint32 i, pixel_count = s->img_x * s->img_y;
  4945. stbi_uc* p = z->out;
  4946. if (s->img_out_n == 3) { // convert bgr to rgb
  4947. for (i = 0; i < pixel_count; ++i) {
  4948. stbi_uc t = p[0];
  4949. p[0] = p[2];
  4950. p[2] = t;
  4951. p += 3;
  4952. }
  4953. } else {
  4954. STBI_ASSERT(s->img_out_n == 4);
  4955. if (stbi__unpremultiply_on_load) {
  4956. // convert bgr to rgb and unpremultiply
  4957. for (i = 0; i < pixel_count; ++i) {
  4958. stbi_uc a = p[3];
  4959. stbi_uc t = p[0];
  4960. if (a) {
  4961. stbi_uc half = a / 2;
  4962. p[0] = (p[2] * 255 + half) / a;
  4963. p[1] = (p[1] * 255 + half) / a;
  4964. p[2] = (t * 255 + half) / a;
  4965. } else {
  4966. p[0] = p[2];
  4967. p[2] = t;
  4968. }
  4969. p += 4;
  4970. }
  4971. } else {
  4972. // convert bgr to rgb
  4973. for (i = 0; i < pixel_count; ++i) {
  4974. stbi_uc t = p[0];
  4975. p[0] = p[2];
  4976. p[2] = t;
  4977. p += 4;
  4978. }
  4979. }
  4980. }
  4981. }
  4982. #define STBI__PNG_TYPE(a, b, c, d) \
  4983. (((unsigned)(a) << 24) + ((unsigned)(b) << 16) + ((unsigned)(c) << 8) + \
  4984. (unsigned)(d))
  4985. static int stbi__parse_png_file(stbi__png* z, int scan, int req_comp) {
  4986. stbi_uc palette[1024], pal_img_n = 0;
  4987. stbi_uc has_trans = 0, tc[3] = {0};
  4988. stbi__uint16 tc16[3];
  4989. stbi__uint32 ioff = 0, idata_limit = 0, i, pal_len = 0;
  4990. int first = 1, k, interlace = 0, color = 0, is_iphone = 0;
  4991. stbi__context* s = z->s;
  4992. z->expanded = NULL;
  4993. z->idata = NULL;
  4994. z->out = NULL;
  4995. if (!stbi__check_png_header(s))
  4996. return 0;
  4997. if (scan == STBI__SCAN_type)
  4998. return 1;
  4999. for (;;) {
  5000. stbi__pngchunk c = stbi__get_chunk_header(s);
  5001. switch (c.type) {
  5002. case STBI__PNG_TYPE('C', 'g', 'B', 'I'):
  5003. is_iphone = 1;
  5004. stbi__skip(s, c.length);
  5005. break;
  5006. case STBI__PNG_TYPE('I', 'H', 'D', 'R'): {
  5007. int comp, filter;
  5008. if (!first)
  5009. return stbi__err("multiple IHDR", "Corrupt PNG");
  5010. first = 0;
  5011. if (c.length != 13)
  5012. return stbi__err("bad IHDR len", "Corrupt PNG");
  5013. s->img_x = stbi__get32be(s);
  5014. s->img_y = stbi__get32be(s);
  5015. if (s->img_y > STBI_MAX_DIMENSIONS)
  5016. return stbi__err("too large", "Very large image (corrupt?)");
  5017. if (s->img_x > STBI_MAX_DIMENSIONS)
  5018. return stbi__err("too large", "Very large image (corrupt?)");
  5019. z->depth = stbi__get8(s);
  5020. if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 &&
  5021. z->depth != 16)
  5022. return stbi__err(
  5023. "1/2/4/8/16-bit only",
  5024. "PNG not supported: 1/2/4/8/16-bit only");
  5025. color = stbi__get8(s);
  5026. if (color > 6)
  5027. return stbi__err("bad ctype", "Corrupt PNG");
  5028. if (color == 3 && z->depth == 16)
  5029. return stbi__err("bad ctype", "Corrupt PNG");
  5030. if (color == 3)
  5031. pal_img_n = 3;
  5032. else if (color & 1)
  5033. return stbi__err("bad ctype", "Corrupt PNG");
  5034. comp = stbi__get8(s);
  5035. if (comp)
  5036. return stbi__err("bad comp method", "Corrupt PNG");
  5037. filter = stbi__get8(s);
  5038. if (filter)
  5039. return stbi__err("bad filter method", "Corrupt PNG");
  5040. interlace = stbi__get8(s);
  5041. if (interlace > 1)
  5042. return stbi__err("bad interlace method", "Corrupt PNG");
  5043. if (!s->img_x || !s->img_y)
  5044. return stbi__err("0-pixel image", "Corrupt PNG");
  5045. if (!pal_img_n) {
  5046. s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0);
  5047. if ((1 << 30) / s->img_x / s->img_n < s->img_y)
  5048. return stbi__err("too large", "Image too large to decode");
  5049. if (scan == STBI__SCAN_header)
  5050. return 1;
  5051. } else {
  5052. // if paletted, then pal_n is our final components, and
  5053. // img_n is # components to decompress/filter.
  5054. s->img_n = 1;
  5055. if ((1 << 30) / s->img_x / 4 < s->img_y)
  5056. return stbi__err("too large", "Corrupt PNG");
  5057. // if SCAN_header, have to scan to see if we have a tRNS
  5058. }
  5059. break;
  5060. }
  5061. case STBI__PNG_TYPE('P', 'L', 'T', 'E'): {
  5062. if (first)
  5063. return stbi__err("first not IHDR", "Corrupt PNG");
  5064. if (c.length > 256 * 3)
  5065. return stbi__err("invalid PLTE", "Corrupt PNG");
  5066. pal_len = c.length / 3;
  5067. if (pal_len * 3 != c.length)
  5068. return stbi__err("invalid PLTE", "Corrupt PNG");
  5069. for (i = 0; i < pal_len; ++i) {
  5070. palette[i * 4 + 0] = stbi__get8(s);
  5071. palette[i * 4 + 1] = stbi__get8(s);
  5072. palette[i * 4 + 2] = stbi__get8(s);
  5073. palette[i * 4 + 3] = 255;
  5074. }
  5075. break;
  5076. }
  5077. case STBI__PNG_TYPE('t', 'R', 'N', 'S'): {
  5078. if (first)
  5079. return stbi__err("first not IHDR", "Corrupt PNG");
  5080. if (z->idata)
  5081. return stbi__err("tRNS after IDAT", "Corrupt PNG");
  5082. if (pal_img_n) {
  5083. if (scan == STBI__SCAN_header) {
  5084. s->img_n = 4;
  5085. return 1;
  5086. }
  5087. if (pal_len == 0)
  5088. return stbi__err("tRNS before PLTE", "Corrupt PNG");
  5089. if (c.length > pal_len)
  5090. return stbi__err("bad tRNS len", "Corrupt PNG");
  5091. pal_img_n = 4;
  5092. for (i = 0; i < c.length; ++i)
  5093. palette[i * 4 + 3] = stbi__get8(s);
  5094. } else {
  5095. if (!(s->img_n & 1))
  5096. return stbi__err("tRNS with alpha", "Corrupt PNG");
  5097. if (c.length != (stbi__uint32)s->img_n * 2)
  5098. return stbi__err("bad tRNS len", "Corrupt PNG");
  5099. has_trans = 1;
  5100. if (z->depth == 16) {
  5101. for (k = 0; k < s->img_n; ++k)
  5102. tc16[k] = (stbi__uint16)stbi__get16be(
  5103. s); // copy the values as-is
  5104. } else {
  5105. for (k = 0; k < s->img_n; ++k)
  5106. tc[k] = (stbi_uc)(stbi__get16be(s) & 255) *
  5107. stbi__depth_scale_table
  5108. [z->depth]; // non 8-bit images will be
  5109. // larger
  5110. }
  5111. }
  5112. break;
  5113. }
  5114. case STBI__PNG_TYPE('I', 'D', 'A', 'T'): {
  5115. if (first)
  5116. return stbi__err("first not IHDR", "Corrupt PNG");
  5117. if (pal_img_n && !pal_len)
  5118. return stbi__err("no PLTE", "Corrupt PNG");
  5119. if (scan == STBI__SCAN_header) {
  5120. s->img_n = pal_img_n;
  5121. return 1;
  5122. }
  5123. if ((int)(ioff + c.length) < (int)ioff)
  5124. return 0;
  5125. if (ioff + c.length > idata_limit) {
  5126. stbi__uint32 idata_limit_old = idata_limit;
  5127. stbi_uc* p;
  5128. if (idata_limit == 0)
  5129. idata_limit = c.length > 4096 ? c.length : 4096;
  5130. while (ioff + c.length > idata_limit)
  5131. idata_limit *= 2;
  5132. STBI_NOTUSED(idata_limit_old);
  5133. p = (stbi_uc*)STBI_REALLOC_SIZED(
  5134. z->idata, idata_limit_old, idata_limit);
  5135. if (p == NULL)
  5136. return stbi__err("outofmem", "Out of memory");
  5137. z->idata = p;
  5138. }
  5139. if (!stbi__getn(s, z->idata + ioff, c.length))
  5140. return stbi__err("outofdata", "Corrupt PNG");
  5141. ioff += c.length;
  5142. break;
  5143. }
  5144. case STBI__PNG_TYPE('I', 'E', 'N', 'D'): {
  5145. stbi__uint32 raw_len, bpl;
  5146. if (first)
  5147. return stbi__err("first not IHDR", "Corrupt PNG");
  5148. if (scan != STBI__SCAN_load)
  5149. return 1;
  5150. if (z->idata == NULL)
  5151. return stbi__err("no IDAT", "Corrupt PNG");
  5152. // initial guess for decoded data size to avoid unnecessary reallocs
  5153. bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component
  5154. raw_len = bpl * s->img_y * s->img_n /* pixels */ +
  5155. s->img_y /* filter mode per row */;
  5156. z->expanded = (stbi_uc*)stbi_zlib_decode_malloc_guesssize_headerflag(
  5157. (char*)z->idata, ioff, raw_len, (int*)&raw_len, !is_iphone);
  5158. if (z->expanded == NULL)
  5159. return 0; // zlib should set error
  5160. STBI_FREE(z->idata);
  5161. z->idata = NULL;
  5162. if ((req_comp == s->img_n + 1 && req_comp != 3 && !pal_img_n) ||
  5163. has_trans)
  5164. s->img_out_n = s->img_n + 1;
  5165. else
  5166. s->img_out_n = s->img_n;
  5167. if (!stbi__create_png_image(
  5168. z, z->expanded, raw_len, s->img_out_n, z->depth, color,
  5169. interlace))
  5170. return 0;
  5171. if (has_trans) {
  5172. if (z->depth == 16) {
  5173. if (!stbi__compute_transparency16(z, tc16, s->img_out_n))
  5174. return 0;
  5175. } else {
  5176. if (!stbi__compute_transparency(z, tc, s->img_out_n))
  5177. return 0;
  5178. }
  5179. }
  5180. if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2)
  5181. stbi__de_iphone(z);
  5182. if (pal_img_n) {
  5183. // pal_img_n == 3 or 4
  5184. s->img_n = pal_img_n; // record the actual colors we had
  5185. s->img_out_n = pal_img_n;
  5186. if (req_comp >= 3)
  5187. s->img_out_n = req_comp;
  5188. if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n))
  5189. return 0;
  5190. } else if (has_trans) {
  5191. // non-paletted image with tRNS -> source image has (constant) alpha
  5192. ++s->img_n;
  5193. }
  5194. STBI_FREE(z->expanded);
  5195. z->expanded = NULL;
  5196. // end of PNG chunk, read and skip CRC
  5197. stbi__get32be(s);
  5198. return 1;
  5199. }
  5200. default:
  5201. // if critical, fail
  5202. if (first)
  5203. return stbi__err("first not IHDR", "Corrupt PNG");
  5204. if ((c.type & (1 << 29)) == 0) {
  5205. #ifndef STBI_NO_FAILURE_STRINGS
  5206. // not threadsafe
  5207. static char invalid_chunk[] = "XXXX PNG chunk not known";
  5208. invalid_chunk[0] = STBI__BYTECAST(c.type >> 24);
  5209. invalid_chunk[1] = STBI__BYTECAST(c.type >> 16);
  5210. invalid_chunk[2] = STBI__BYTECAST(c.type >> 8);
  5211. invalid_chunk[3] = STBI__BYTECAST(c.type >> 0);
  5212. #endif
  5213. return stbi__err(
  5214. invalid_chunk, "PNG not supported: unknown PNG chunk type");
  5215. }
  5216. stbi__skip(s, c.length);
  5217. break;
  5218. }
  5219. // end of PNG chunk, read and skip CRC
  5220. stbi__get32be(s);
  5221. }
  5222. }
  5223. static void* stbi__do_png(
  5224. stbi__png* p, int* x, int* y, int* n, int req_comp, stbi__result_info* ri) {
  5225. void* result = NULL;
  5226. if (req_comp < 0 || req_comp > 4)
  5227. return stbi__errpuc("bad req_comp", "Internal error");
  5228. if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) {
  5229. if (p->depth <= 8)
  5230. ri->bits_per_channel = 8;
  5231. else if (p->depth == 16)
  5232. ri->bits_per_channel = 16;
  5233. else
  5234. return stbi__errpuc(
  5235. "bad bits_per_channel",
  5236. "PNG not supported: unsupported color depth");
  5237. result = p->out;
  5238. p->out = NULL;
  5239. if (req_comp && req_comp != p->s->img_out_n) {
  5240. if (ri->bits_per_channel == 8)
  5241. result = stbi__convert_format(
  5242. (unsigned char*)result, p->s->img_out_n, req_comp, p->s->img_x,
  5243. p->s->img_y);
  5244. else
  5245. result = stbi__convert_format16(
  5246. (stbi__uint16*)result, p->s->img_out_n, req_comp, p->s->img_x,
  5247. p->s->img_y);
  5248. p->s->img_out_n = req_comp;
  5249. if (result == NULL)
  5250. return result;
  5251. }
  5252. *x = p->s->img_x;
  5253. *y = p->s->img_y;
  5254. if (n)
  5255. *n = p->s->img_n;
  5256. }
  5257. STBI_FREE(p->out);
  5258. p->out = NULL;
  5259. STBI_FREE(p->expanded);
  5260. p->expanded = NULL;
  5261. STBI_FREE(p->idata);
  5262. p->idata = NULL;
  5263. return result;
  5264. }
  5265. static void* stbi__png_load(
  5266. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  5267. stbi__result_info* ri) {
  5268. stbi__png p;
  5269. p.s = s;
  5270. return stbi__do_png(&p, x, y, comp, req_comp, ri);
  5271. }
  5272. static int stbi__png_test(stbi__context* s) {
  5273. int r;
  5274. r = stbi__check_png_header(s);
  5275. stbi__rewind(s);
  5276. return r;
  5277. }
  5278. static int stbi__png_info_raw(stbi__png* p, int* x, int* y, int* comp) {
  5279. if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) {
  5280. stbi__rewind(p->s);
  5281. return 0;
  5282. }
  5283. if (x)
  5284. *x = p->s->img_x;
  5285. if (y)
  5286. *y = p->s->img_y;
  5287. if (comp)
  5288. *comp = p->s->img_n;
  5289. return 1;
  5290. }
  5291. static int stbi__png_info(stbi__context* s, int* x, int* y, int* comp) {
  5292. stbi__png p;
  5293. p.s = s;
  5294. return stbi__png_info_raw(&p, x, y, comp);
  5295. }
  5296. static int stbi__png_is16(stbi__context* s) {
  5297. stbi__png p;
  5298. p.s = s;
  5299. if (!stbi__png_info_raw(&p, NULL, NULL, NULL))
  5300. return 0;
  5301. if (p.depth != 16) {
  5302. stbi__rewind(p.s);
  5303. return 0;
  5304. }
  5305. return 1;
  5306. }
  5307. #endif
  5308. // Microsoft/Windows BMP image
  5309. #ifndef STBI_NO_BMP
  5310. static int stbi__bmp_test_raw(stbi__context* s) {
  5311. int r;
  5312. int sz;
  5313. if (stbi__get8(s) != 'B')
  5314. return 0;
  5315. if (stbi__get8(s) != 'M')
  5316. return 0;
  5317. stbi__get32le(s); // discard filesize
  5318. stbi__get16le(s); // discard reserved
  5319. stbi__get16le(s); // discard reserved
  5320. stbi__get32le(s); // discard data offset
  5321. sz = stbi__get32le(s);
  5322. r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124);
  5323. return r;
  5324. }
  5325. static int stbi__bmp_test(stbi__context* s) {
  5326. int r = stbi__bmp_test_raw(s);
  5327. stbi__rewind(s);
  5328. return r;
  5329. }
  5330. // returns 0..31 for the highest set bit
  5331. static int stbi__high_bit(unsigned int z) {
  5332. int n = 0;
  5333. if (z == 0)
  5334. return -1;
  5335. if (z >= 0x10000) {
  5336. n += 16;
  5337. z >>= 16;
  5338. }
  5339. if (z >= 0x00100) {
  5340. n += 8;
  5341. z >>= 8;
  5342. }
  5343. if (z >= 0x00010) {
  5344. n += 4;
  5345. z >>= 4;
  5346. }
  5347. if (z >= 0x00004) {
  5348. n += 2;
  5349. z >>= 2;
  5350. }
  5351. if (z >= 0x00002) {
  5352. n += 1; /* >>= 1;*/
  5353. }
  5354. return n;
  5355. }
  5356. static int stbi__bitcount(unsigned int a) {
  5357. a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2
  5358. a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4
  5359. a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits
  5360. a = (a + (a >> 8)); // max 16 per 8 bits
  5361. a = (a + (a >> 16)); // max 32 per 8 bits
  5362. return a & 0xff;
  5363. }
  5364. // extract an arbitrarily-aligned N-bit value (N=bits)
  5365. // from v, and then make it 8-bits long and fractionally
  5366. // extend it to full full range.
  5367. static int stbi__shiftsigned(unsigned int v, int shift, int bits) {
  5368. static unsigned int mul_table[9] = {
  5369. 0,
  5370. 0xff /*0b11111111*/,
  5371. 0x55 /*0b01010101*/,
  5372. 0x49 /*0b01001001*/,
  5373. 0x11 /*0b00010001*/,
  5374. 0x21 /*0b00100001*/,
  5375. 0x41 /*0b01000001*/,
  5376. 0x81 /*0b10000001*/,
  5377. 0x01 /*0b00000001*/,
  5378. };
  5379. static unsigned int shift_table[9] = {
  5380. 0, 0, 0, 1, 0, 2, 4, 6, 0,
  5381. };
  5382. if (shift < 0)
  5383. v <<= -shift;
  5384. else
  5385. v >>= shift;
  5386. STBI_ASSERT(v < 256);
  5387. v >>= (8 - bits);
  5388. STBI_ASSERT(bits >= 0 && bits <= 8);
  5389. return (int)((unsigned)v * mul_table[bits]) >> shift_table[bits];
  5390. }
  5391. typedef struct {
  5392. int bpp, offset, hsz;
  5393. unsigned int mr, mg, mb, ma, all_a;
  5394. int extra_read;
  5395. } stbi__bmp_data;
  5396. static int stbi__bmp_set_mask_defaults(stbi__bmp_data* info, int compress) {
  5397. // BI_BITFIELDS specifies masks explicitly, don't override
  5398. if (compress == 3)
  5399. return 1;
  5400. if (compress == 0) {
  5401. if (info->bpp == 16) {
  5402. info->mr = 31u << 10;
  5403. info->mg = 31u << 5;
  5404. info->mb = 31u << 0;
  5405. } else if (info->bpp == 32) {
  5406. info->mr = 0xffu << 16;
  5407. info->mg = 0xffu << 8;
  5408. info->mb = 0xffu << 0;
  5409. info->ma = 0xffu << 24;
  5410. info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but
  5411. // it was all 0
  5412. } else {
  5413. // otherwise, use defaults, which is all-0
  5414. info->mr = info->mg = info->mb = info->ma = 0;
  5415. }
  5416. return 1;
  5417. }
  5418. return 0; // error
  5419. }
  5420. static void* stbi__bmp_parse_header(stbi__context* s, stbi__bmp_data* info) {
  5421. int hsz;
  5422. if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M')
  5423. return stbi__errpuc("not BMP", "Corrupt BMP");
  5424. stbi__get32le(s); // discard filesize
  5425. stbi__get16le(s); // discard reserved
  5426. stbi__get16le(s); // discard reserved
  5427. info->offset = stbi__get32le(s);
  5428. info->hsz = hsz = stbi__get32le(s);
  5429. info->mr = info->mg = info->mb = info->ma = 0;
  5430. info->extra_read = 14;
  5431. if (info->offset < 0)
  5432. return stbi__errpuc("bad BMP", "bad BMP");
  5433. if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124)
  5434. return stbi__errpuc("unknown BMP", "BMP type not supported: unknown");
  5435. if (hsz == 12) {
  5436. s->img_x = stbi__get16le(s);
  5437. s->img_y = stbi__get16le(s);
  5438. } else {
  5439. s->img_x = stbi__get32le(s);
  5440. s->img_y = stbi__get32le(s);
  5441. }
  5442. if (stbi__get16le(s) != 1)
  5443. return stbi__errpuc("bad BMP", "bad BMP");
  5444. info->bpp = stbi__get16le(s);
  5445. if (hsz != 12) {
  5446. int compress = stbi__get32le(s);
  5447. if (compress == 1 || compress == 2)
  5448. return stbi__errpuc("BMP RLE", "BMP type not supported: RLE");
  5449. if (compress >= 4)
  5450. return stbi__errpuc(
  5451. "BMP JPEG/PNG",
  5452. "BMP type not supported: unsupported compression"); // this includes
  5453. // PNG/JPEG modes
  5454. if (compress == 3 && info->bpp != 16 && info->bpp != 32)
  5455. return stbi__errpuc(
  5456. "bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel
  5457. stbi__get32le(s); // discard sizeof
  5458. stbi__get32le(s); // discard hres
  5459. stbi__get32le(s); // discard vres
  5460. stbi__get32le(s); // discard colorsused
  5461. stbi__get32le(s); // discard max important
  5462. if (hsz == 40 || hsz == 56) {
  5463. if (hsz == 56) {
  5464. stbi__get32le(s);
  5465. stbi__get32le(s);
  5466. stbi__get32le(s);
  5467. stbi__get32le(s);
  5468. }
  5469. if (info->bpp == 16 || info->bpp == 32) {
  5470. if (compress == 0) {
  5471. stbi__bmp_set_mask_defaults(info, compress);
  5472. } else if (compress == 3) {
  5473. info->mr = stbi__get32le(s);
  5474. info->mg = stbi__get32le(s);
  5475. info->mb = stbi__get32le(s);
  5476. info->extra_read += 12;
  5477. // not documented, but generated by photoshop and handled by mspaint
  5478. if (info->mr == info->mg && info->mg == info->mb) {
  5479. // ?!?!?
  5480. return stbi__errpuc("bad BMP", "bad BMP");
  5481. }
  5482. } else
  5483. return stbi__errpuc("bad BMP", "bad BMP");
  5484. }
  5485. } else {
  5486. // V4/V5 header
  5487. int i;
  5488. if (hsz != 108 && hsz != 124)
  5489. return stbi__errpuc("bad BMP", "bad BMP");
  5490. info->mr = stbi__get32le(s);
  5491. info->mg = stbi__get32le(s);
  5492. info->mb = stbi__get32le(s);
  5493. info->ma = stbi__get32le(s);
  5494. if (compress !=
  5495. 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs
  5496. stbi__bmp_set_mask_defaults(info, compress);
  5497. stbi__get32le(s); // discard color space
  5498. for (i = 0; i < 12; ++i)
  5499. stbi__get32le(s); // discard color space parameters
  5500. if (hsz == 124) {
  5501. stbi__get32le(s); // discard rendering intent
  5502. stbi__get32le(s); // discard offset of profile data
  5503. stbi__get32le(s); // discard size of profile data
  5504. stbi__get32le(s); // discard reserved
  5505. }
  5506. }
  5507. }
  5508. return (void*)1;
  5509. }
  5510. static void* stbi__bmp_load(
  5511. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  5512. stbi__result_info* ri) {
  5513. stbi_uc* out;
  5514. unsigned int mr = 0, mg = 0, mb = 0, ma = 0, all_a;
  5515. stbi_uc pal[256][4];
  5516. int psize = 0, i, j, width;
  5517. int flip_vertically, pad, target;
  5518. stbi__bmp_data info;
  5519. STBI_NOTUSED(ri);
  5520. info.all_a = 255;
  5521. if (stbi__bmp_parse_header(s, &info) == NULL)
  5522. return NULL; // error code already set
  5523. flip_vertically = ((int)s->img_y) > 0;
  5524. s->img_y = abs((int)s->img_y);
  5525. if (s->img_y > STBI_MAX_DIMENSIONS)
  5526. return stbi__errpuc("too large", "Very large image (corrupt?)");
  5527. if (s->img_x > STBI_MAX_DIMENSIONS)
  5528. return stbi__errpuc("too large", "Very large image (corrupt?)");
  5529. mr = info.mr;
  5530. mg = info.mg;
  5531. mb = info.mb;
  5532. ma = info.ma;
  5533. all_a = info.all_a;
  5534. if (info.hsz == 12) {
  5535. if (info.bpp < 24)
  5536. psize = (info.offset - info.extra_read - 24) / 3;
  5537. } else {
  5538. if (info.bpp < 16)
  5539. psize = (info.offset - info.extra_read - info.hsz) >> 2;
  5540. }
  5541. if (psize == 0) {
  5542. if (info.offset !=
  5543. s->callback_already_read + (s->img_buffer - s->img_buffer_original)) {
  5544. return stbi__errpuc("bad offset", "Corrupt BMP");
  5545. }
  5546. }
  5547. if (info.bpp == 24 && ma == 0xff000000)
  5548. s->img_n = 3;
  5549. else
  5550. s->img_n = ma ? 4 : 3;
  5551. if (req_comp && req_comp >= 3) // we can directly decode 3 or 4
  5552. target = req_comp;
  5553. else
  5554. target = s->img_n; // if they want monochrome, we'll post-convert
  5555. // sanity-check size
  5556. if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0))
  5557. return stbi__errpuc("too large", "Corrupt BMP");
  5558. out = (stbi_uc*)stbi__malloc_mad3(target, s->img_x, s->img_y, 0);
  5559. if (!out)
  5560. return stbi__errpuc("outofmem", "Out of memory");
  5561. if (info.bpp < 16) {
  5562. int z = 0;
  5563. if (psize == 0 || psize > 256) {
  5564. STBI_FREE(out);
  5565. return stbi__errpuc("invalid", "Corrupt BMP");
  5566. }
  5567. for (i = 0; i < psize; ++i) {
  5568. pal[i][2] = stbi__get8(s);
  5569. pal[i][1] = stbi__get8(s);
  5570. pal[i][0] = stbi__get8(s);
  5571. if (info.hsz != 12)
  5572. stbi__get8(s);
  5573. pal[i][3] = 255;
  5574. }
  5575. stbi__skip(
  5576. s, info.offset - info.extra_read - info.hsz -
  5577. psize * (info.hsz == 12 ? 3 : 4));
  5578. if (info.bpp == 1)
  5579. width = (s->img_x + 7) >> 3;
  5580. else if (info.bpp == 4)
  5581. width = (s->img_x + 1) >> 1;
  5582. else if (info.bpp == 8)
  5583. width = s->img_x;
  5584. else {
  5585. STBI_FREE(out);
  5586. return stbi__errpuc("bad bpp", "Corrupt BMP");
  5587. }
  5588. pad = (-width) & 3;
  5589. if (info.bpp == 1) {
  5590. for (j = 0; j < (int)s->img_y; ++j) {
  5591. int bit_offset = 7, v = stbi__get8(s);
  5592. for (i = 0; i < (int)s->img_x; ++i) {
  5593. int color = (v >> bit_offset) & 0x1;
  5594. out[z++] = pal[color][0];
  5595. out[z++] = pal[color][1];
  5596. out[z++] = pal[color][2];
  5597. if (target == 4)
  5598. out[z++] = 255;
  5599. if (i + 1 == (int)s->img_x)
  5600. break;
  5601. if ((--bit_offset) < 0) {
  5602. bit_offset = 7;
  5603. v = stbi__get8(s);
  5604. }
  5605. }
  5606. stbi__skip(s, pad);
  5607. }
  5608. } else {
  5609. for (j = 0; j < (int)s->img_y; ++j) {
  5610. for (i = 0; i < (int)s->img_x; i += 2) {
  5611. int v = stbi__get8(s), v2 = 0;
  5612. if (info.bpp == 4) {
  5613. v2 = v & 15;
  5614. v >>= 4;
  5615. }
  5616. out[z++] = pal[v][0];
  5617. out[z++] = pal[v][1];
  5618. out[z++] = pal[v][2];
  5619. if (target == 4)
  5620. out[z++] = 255;
  5621. if (i + 1 == (int)s->img_x)
  5622. break;
  5623. v = (info.bpp == 8) ? stbi__get8(s) : v2;
  5624. out[z++] = pal[v][0];
  5625. out[z++] = pal[v][1];
  5626. out[z++] = pal[v][2];
  5627. if (target == 4)
  5628. out[z++] = 255;
  5629. }
  5630. stbi__skip(s, pad);
  5631. }
  5632. }
  5633. } else {
  5634. int rshift = 0, gshift = 0, bshift = 0, ashift = 0, rcount = 0, gcount = 0,
  5635. bcount = 0, acount = 0;
  5636. int z = 0;
  5637. int easy = 0;
  5638. stbi__skip(s, info.offset - info.extra_read - info.hsz);
  5639. if (info.bpp == 24)
  5640. width = 3 * s->img_x;
  5641. else if (info.bpp == 16)
  5642. width = 2 * s->img_x;
  5643. else /* bpp = 32 and pad = 0 */
  5644. width = 0;
  5645. pad = (-width) & 3;
  5646. if (info.bpp == 24) {
  5647. easy = 1;
  5648. } else if (info.bpp == 32) {
  5649. if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000)
  5650. easy = 2;
  5651. }
  5652. if (!easy) {
  5653. if (!mr || !mg || !mb) {
  5654. STBI_FREE(out);
  5655. return stbi__errpuc("bad masks", "Corrupt BMP");
  5656. }
  5657. // right shift amt to put high bit in position #7
  5658. rshift = stbi__high_bit(mr) - 7;
  5659. rcount = stbi__bitcount(mr);
  5660. gshift = stbi__high_bit(mg) - 7;
  5661. gcount = stbi__bitcount(mg);
  5662. bshift = stbi__high_bit(mb) - 7;
  5663. bcount = stbi__bitcount(mb);
  5664. ashift = stbi__high_bit(ma) - 7;
  5665. acount = stbi__bitcount(ma);
  5666. if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) {
  5667. STBI_FREE(out);
  5668. return stbi__errpuc("bad masks", "Corrupt BMP");
  5669. }
  5670. }
  5671. for (j = 0; j < (int)s->img_y; ++j) {
  5672. if (easy) {
  5673. for (i = 0; i < (int)s->img_x; ++i) {
  5674. unsigned char a;
  5675. out[z + 2] = stbi__get8(s);
  5676. out[z + 1] = stbi__get8(s);
  5677. out[z + 0] = stbi__get8(s);
  5678. z += 3;
  5679. a = (easy == 2 ? stbi__get8(s) : 255);
  5680. all_a |= a;
  5681. if (target == 4)
  5682. out[z++] = a;
  5683. }
  5684. } else {
  5685. int bpp = info.bpp;
  5686. for (i = 0; i < (int)s->img_x; ++i) {
  5687. stbi__uint32 v =
  5688. (bpp == 16 ? (stbi__uint32)stbi__get16le(s)
  5689. : stbi__get32le(s));
  5690. unsigned int a;
  5691. out[z++] =
  5692. STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount));
  5693. out[z++] =
  5694. STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount));
  5695. out[z++] =
  5696. STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount));
  5697. a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255);
  5698. all_a |= a;
  5699. if (target == 4)
  5700. out[z++] = STBI__BYTECAST(a);
  5701. }
  5702. }
  5703. stbi__skip(s, pad);
  5704. }
  5705. }
  5706. // if alpha channel is all 0s, replace with all 255s
  5707. if (target == 4 && all_a == 0)
  5708. for (i = 4 * s->img_x * s->img_y - 1; i >= 0; i -= 4)
  5709. out[i] = 255;
  5710. if (flip_vertically) {
  5711. stbi_uc t;
  5712. for (j = 0; j < (int)s->img_y >> 1; ++j) {
  5713. stbi_uc* p1 = out + j * s->img_x * target;
  5714. stbi_uc* p2 = out + (s->img_y - 1 - j) * s->img_x * target;
  5715. for (i = 0; i < (int)s->img_x * target; ++i) {
  5716. t = p1[i];
  5717. p1[i] = p2[i];
  5718. p2[i] = t;
  5719. }
  5720. }
  5721. }
  5722. if (req_comp && req_comp != target) {
  5723. out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y);
  5724. if (out == NULL)
  5725. return out; // stbi__convert_format frees input on failure
  5726. }
  5727. *x = s->img_x;
  5728. *y = s->img_y;
  5729. if (comp)
  5730. *comp = s->img_n;
  5731. return out;
  5732. }
  5733. #endif
  5734. // Targa Truevision - TGA
  5735. // by Jonathan Dummer
  5736. #ifndef STBI_NO_TGA
  5737. // returns STBI_rgb or whatever, 0 on error
  5738. static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) {
  5739. // only RGB or RGBA (incl. 16bit) or grey allowed
  5740. if (is_rgb16)
  5741. *is_rgb16 = 0;
  5742. switch (bits_per_pixel) {
  5743. case 8:
  5744. return STBI_grey;
  5745. case 16:
  5746. if (is_grey)
  5747. return STBI_grey_alpha;
  5748. // fallthrough
  5749. case 15:
  5750. if (is_rgb16)
  5751. *is_rgb16 = 1;
  5752. return STBI_rgb;
  5753. case 24: // fallthrough
  5754. case 32:
  5755. return bits_per_pixel / 8;
  5756. default:
  5757. return 0;
  5758. }
  5759. }
  5760. static int stbi__tga_info(stbi__context* s, int* x, int* y, int* comp) {
  5761. int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp;
  5762. int sz, tga_colormap_type;
  5763. stbi__get8(s); // discard Offset
  5764. tga_colormap_type = stbi__get8(s); // colormap type
  5765. if (tga_colormap_type > 1) {
  5766. stbi__rewind(s);
  5767. return 0; // only RGB or indexed allowed
  5768. }
  5769. tga_image_type = stbi__get8(s); // image type
  5770. if (tga_colormap_type == 1) { // colormapped (paletted) image
  5771. if (tga_image_type != 1 && tga_image_type != 9) {
  5772. stbi__rewind(s);
  5773. return 0;
  5774. }
  5775. stbi__skip(s, 4); // skip index of first colormap entry and number of entries
  5776. sz = stbi__get8(s); // check bits per palette color entry
  5777. if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) {
  5778. stbi__rewind(s);
  5779. return 0;
  5780. }
  5781. stbi__skip(s, 4); // skip image x and y origin
  5782. tga_colormap_bpp = sz;
  5783. } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE
  5784. if ((tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) &&
  5785. (tga_image_type != 11)) {
  5786. stbi__rewind(s);
  5787. return 0; // only RGB or grey allowed, +/- RLE
  5788. }
  5789. stbi__skip(s, 9); // skip colormap specification and image x/y origin
  5790. tga_colormap_bpp = 0;
  5791. }
  5792. tga_w = stbi__get16le(s);
  5793. if (tga_w < 1) {
  5794. stbi__rewind(s);
  5795. return 0; // test width
  5796. }
  5797. tga_h = stbi__get16le(s);
  5798. if (tga_h < 1) {
  5799. stbi__rewind(s);
  5800. return 0; // test height
  5801. }
  5802. tga_bits_per_pixel = stbi__get8(s); // bits per pixel
  5803. stbi__get8(s); // ignore alpha bits
  5804. if (tga_colormap_bpp != 0) {
  5805. if ((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) {
  5806. // when using a colormap, tga_bits_per_pixel is the size of the indexes
  5807. // I don't think anything but 8 or 16bit indexes makes sense
  5808. stbi__rewind(s);
  5809. return 0;
  5810. }
  5811. tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL);
  5812. } else {
  5813. tga_comp = stbi__tga_get_comp(
  5814. tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11),
  5815. NULL);
  5816. }
  5817. if (!tga_comp) {
  5818. stbi__rewind(s);
  5819. return 0;
  5820. }
  5821. if (x)
  5822. *x = tga_w;
  5823. if (y)
  5824. *y = tga_h;
  5825. if (comp)
  5826. *comp = tga_comp;
  5827. return 1; // seems to have passed everything
  5828. }
  5829. static int stbi__tga_test(stbi__context* s) {
  5830. int res = 0;
  5831. int sz, tga_color_type;
  5832. stbi__get8(s); // discard Offset
  5833. tga_color_type = stbi__get8(s); // color type
  5834. if (tga_color_type > 1)
  5835. goto errorEnd; // only RGB or indexed allowed
  5836. sz = stbi__get8(s); // image type
  5837. if (tga_color_type == 1) { // colormapped (paletted) image
  5838. if (sz != 1 && sz != 9)
  5839. goto errorEnd; // colortype 1 demands image type 1 or 9
  5840. stbi__skip(s, 4); // skip index of first colormap entry and number of entries
  5841. sz = stbi__get8(s); // check bits per palette color entry
  5842. if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32))
  5843. goto errorEnd;
  5844. stbi__skip(s, 4); // skip image x and y origin
  5845. } else { // "normal" image w/o colormap
  5846. if ((sz != 2) && (sz != 3) && (sz != 10) && (sz != 11))
  5847. goto errorEnd; // only RGB or grey allowed, +/- RLE
  5848. stbi__skip(s, 9); // skip colormap specification and image x/y origin
  5849. }
  5850. if (stbi__get16le(s) < 1)
  5851. goto errorEnd; // test width
  5852. if (stbi__get16le(s) < 1)
  5853. goto errorEnd; // test height
  5854. sz = stbi__get8(s); // bits per pixel
  5855. if ((tga_color_type == 1) && (sz != 8) && (sz != 16))
  5856. goto errorEnd; // for colormapped images, bpp is size of an index
  5857. if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32))
  5858. goto errorEnd;
  5859. res = 1; // if we got this far, everything's good and we can return 1 instead of 0
  5860. errorEnd:
  5861. stbi__rewind(s);
  5862. return res;
  5863. }
  5864. // read 16bit value and convert to 24bit RGB
  5865. static void stbi__tga_read_rgb16(stbi__context* s, stbi_uc* out) {
  5866. stbi__uint16 px = (stbi__uint16)stbi__get16le(s);
  5867. stbi__uint16 fiveBitMask = 31;
  5868. // we have 3 channels with 5bits each
  5869. int r = (px >> 10) & fiveBitMask;
  5870. int g = (px >> 5) & fiveBitMask;
  5871. int b = px & fiveBitMask;
  5872. // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped
  5873. // later
  5874. out[0] = (stbi_uc)((r * 255) / 31);
  5875. out[1] = (stbi_uc)((g * 255) / 31);
  5876. out[2] = (stbi_uc)((b * 255) / 31);
  5877. // some people claim that the most significant bit might be used for alpha
  5878. // (possibly if an alpha-bit is set in the "image descriptor byte")
  5879. // but that only made 16bit test images completely translucent..
  5880. // so let's treat all 15 and 16bit TGAs as RGB with no alpha.
  5881. }
  5882. static void* stbi__tga_load(
  5883. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  5884. stbi__result_info* ri) {
  5885. // read in the TGA header stuff
  5886. int tga_offset = stbi__get8(s);
  5887. int tga_indexed = stbi__get8(s);
  5888. int tga_image_type = stbi__get8(s);
  5889. int tga_is_RLE = 0;
  5890. int tga_palette_start = stbi__get16le(s);
  5891. int tga_palette_len = stbi__get16le(s);
  5892. int tga_palette_bits = stbi__get8(s);
  5893. int tga_x_origin = stbi__get16le(s);
  5894. int tga_y_origin = stbi__get16le(s);
  5895. int tga_width = stbi__get16le(s);
  5896. int tga_height = stbi__get16le(s);
  5897. int tga_bits_per_pixel = stbi__get8(s);
  5898. int tga_comp, tga_rgb16 = 0;
  5899. int tga_inverted = stbi__get8(s);
  5900. // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?)
  5901. // image data
  5902. unsigned char* tga_data;
  5903. unsigned char* tga_palette = NULL;
  5904. int i, j;
  5905. unsigned char raw_data[4] = {0};
  5906. int RLE_count = 0;
  5907. int RLE_repeating = 0;
  5908. int read_next_pixel = 1;
  5909. STBI_NOTUSED(ri);
  5910. STBI_NOTUSED(tga_x_origin); // @TODO
  5911. STBI_NOTUSED(tga_y_origin); // @TODO
  5912. if (tga_height > STBI_MAX_DIMENSIONS)
  5913. return stbi__errpuc("too large", "Very large image (corrupt?)");
  5914. if (tga_width > STBI_MAX_DIMENSIONS)
  5915. return stbi__errpuc("too large", "Very large image (corrupt?)");
  5916. // do a tiny bit of precessing
  5917. if (tga_image_type >= 8) {
  5918. tga_image_type -= 8;
  5919. tga_is_RLE = 1;
  5920. }
  5921. tga_inverted = 1 - ((tga_inverted >> 5) & 1);
  5922. // If I'm paletted, then I'll use the number of bits from the palette
  5923. if (tga_indexed)
  5924. tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16);
  5925. else
  5926. tga_comp = stbi__tga_get_comp(
  5927. tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16);
  5928. if (!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured
  5929. // basic consistency
  5930. return stbi__errpuc("bad format", "Can't find out TGA pixelformat");
  5931. // tga info
  5932. *x = tga_width;
  5933. *y = tga_height;
  5934. if (comp)
  5935. *comp = tga_comp;
  5936. if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0))
  5937. return stbi__errpuc("too large", "Corrupt TGA");
  5938. tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0);
  5939. if (!tga_data)
  5940. return stbi__errpuc("outofmem", "Out of memory");
  5941. // skip to the data's starting position (offset usually = 0)
  5942. stbi__skip(s, tga_offset);
  5943. if (!tga_indexed && !tga_is_RLE && !tga_rgb16) {
  5944. for (i = 0; i < tga_height; ++i) {
  5945. int row = tga_inverted ? tga_height - i - 1 : i;
  5946. stbi_uc* tga_row = tga_data + row * tga_width * tga_comp;
  5947. stbi__getn(s, tga_row, tga_width * tga_comp);
  5948. }
  5949. } else {
  5950. // do I need to load a palette?
  5951. if (tga_indexed) {
  5952. if (tga_palette_len == 0) { /* you have to have at least one entry! */
  5953. STBI_FREE(tga_data);
  5954. return stbi__errpuc("bad palette", "Corrupt TGA");
  5955. }
  5956. // any data to skip? (offset usually = 0)
  5957. stbi__skip(s, tga_palette_start);
  5958. // load the palette
  5959. tga_palette =
  5960. (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0);
  5961. if (!tga_palette) {
  5962. STBI_FREE(tga_data);
  5963. return stbi__errpuc("outofmem", "Out of memory");
  5964. }
  5965. if (tga_rgb16) {
  5966. stbi_uc* pal_entry = tga_palette;
  5967. STBI_ASSERT(tga_comp == STBI_rgb);
  5968. for (i = 0; i < tga_palette_len; ++i) {
  5969. stbi__tga_read_rgb16(s, pal_entry);
  5970. pal_entry += tga_comp;
  5971. }
  5972. } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) {
  5973. STBI_FREE(tga_data);
  5974. STBI_FREE(tga_palette);
  5975. return stbi__errpuc("bad palette", "Corrupt TGA");
  5976. }
  5977. }
  5978. // load the data
  5979. for (i = 0; i < tga_width * tga_height; ++i) {
  5980. // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk?
  5981. if (tga_is_RLE) {
  5982. if (RLE_count == 0) {
  5983. // yep, get the next byte as a RLE command
  5984. int RLE_cmd = stbi__get8(s);
  5985. RLE_count = 1 + (RLE_cmd & 127);
  5986. RLE_repeating = RLE_cmd >> 7;
  5987. read_next_pixel = 1;
  5988. } else if (!RLE_repeating) {
  5989. read_next_pixel = 1;
  5990. }
  5991. } else {
  5992. read_next_pixel = 1;
  5993. }
  5994. // OK, if I need to read a pixel, do it now
  5995. if (read_next_pixel) {
  5996. // load however much data we did have
  5997. if (tga_indexed) {
  5998. // read in index, then perform the lookup
  5999. int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s)
  6000. : stbi__get16le(s);
  6001. if (pal_idx >= tga_palette_len) {
  6002. // invalid index
  6003. pal_idx = 0;
  6004. }
  6005. pal_idx *= tga_comp;
  6006. for (j = 0; j < tga_comp; ++j) {
  6007. raw_data[j] = tga_palette[pal_idx + j];
  6008. }
  6009. } else if (tga_rgb16) {
  6010. STBI_ASSERT(tga_comp == STBI_rgb);
  6011. stbi__tga_read_rgb16(s, raw_data);
  6012. } else {
  6013. // read in the data raw
  6014. for (j = 0; j < tga_comp; ++j) {
  6015. raw_data[j] = stbi__get8(s);
  6016. }
  6017. }
  6018. // clear the reading flag for the next pixel
  6019. read_next_pixel = 0;
  6020. } // end of reading a pixel
  6021. // copy data
  6022. for (j = 0; j < tga_comp; ++j)
  6023. tga_data[i * tga_comp + j] = raw_data[j];
  6024. // in case we're in RLE mode, keep counting down
  6025. --RLE_count;
  6026. }
  6027. // do I need to invert the image?
  6028. if (tga_inverted) {
  6029. for (j = 0; j * 2 < tga_height; ++j) {
  6030. int index1 = j * tga_width * tga_comp;
  6031. int index2 = (tga_height - 1 - j) * tga_width * tga_comp;
  6032. for (i = tga_width * tga_comp; i > 0; --i) {
  6033. unsigned char temp = tga_data[index1];
  6034. tga_data[index1] = tga_data[index2];
  6035. tga_data[index2] = temp;
  6036. ++index1;
  6037. ++index2;
  6038. }
  6039. }
  6040. }
  6041. // clear my palette, if I had one
  6042. if (tga_palette != NULL) {
  6043. STBI_FREE(tga_palette);
  6044. }
  6045. }
  6046. // swap RGB - if the source data was RGB16, it already is in the right order
  6047. if (tga_comp >= 3 && !tga_rgb16) {
  6048. unsigned char* tga_pixel = tga_data;
  6049. for (i = 0; i < tga_width * tga_height; ++i) {
  6050. unsigned char temp = tga_pixel[0];
  6051. tga_pixel[0] = tga_pixel[2];
  6052. tga_pixel[2] = temp;
  6053. tga_pixel += tga_comp;
  6054. }
  6055. }
  6056. // convert to target component count
  6057. if (req_comp && req_comp != tga_comp)
  6058. tga_data = stbi__convert_format(
  6059. tga_data, tga_comp, req_comp, tga_width, tga_height);
  6060. // the things I do to get rid of an error message, and yet keep
  6061. // Microsoft's C compilers happy... [8^(
  6062. tga_palette_start = tga_palette_len = tga_palette_bits = tga_x_origin =
  6063. tga_y_origin = 0;
  6064. STBI_NOTUSED(tga_palette_start);
  6065. // OK, done
  6066. return tga_data;
  6067. }
  6068. #endif
  6069. // *************************************************************************************************
  6070. // Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked
  6071. // by STB
  6072. #ifndef STBI_NO_PSD
  6073. static int stbi__psd_test(stbi__context* s) {
  6074. int r = (stbi__get32be(s) == 0x38425053);
  6075. stbi__rewind(s);
  6076. return r;
  6077. }
  6078. static int stbi__psd_decode_rle(stbi__context* s, stbi_uc* p, int pixelCount) {
  6079. int count, nleft, len;
  6080. count = 0;
  6081. while ((nleft = pixelCount - count) > 0) {
  6082. len = stbi__get8(s);
  6083. if (len == 128) {
  6084. // No-op.
  6085. } else if (len < 128) {
  6086. // Copy next len+1 bytes literally.
  6087. len++;
  6088. if (len > nleft)
  6089. return 0; // corrupt data
  6090. count += len;
  6091. while (len) {
  6092. *p = stbi__get8(s);
  6093. p += 4;
  6094. len--;
  6095. }
  6096. } else if (len > 128) {
  6097. stbi_uc val;
  6098. // Next -len+1 bytes in the dest are replicated from next source byte.
  6099. // (Interpret len as a negative 8-bit int.)
  6100. len = 257 - len;
  6101. if (len > nleft)
  6102. return 0; // corrupt data
  6103. val = stbi__get8(s);
  6104. count += len;
  6105. while (len) {
  6106. *p = val;
  6107. p += 4;
  6108. len--;
  6109. }
  6110. }
  6111. }
  6112. return 1;
  6113. }
  6114. static void* stbi__psd_load(
  6115. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  6116. stbi__result_info* ri, int bpc) {
  6117. int pixelCount;
  6118. int channelCount, compression;
  6119. int channel, i;
  6120. int bitdepth;
  6121. int w, h;
  6122. stbi_uc* out;
  6123. STBI_NOTUSED(ri);
  6124. // Check identifier
  6125. if (stbi__get32be(s) != 0x38425053) // "8BPS"
  6126. return stbi__errpuc("not PSD", "Corrupt PSD image");
  6127. // Check file type version.
  6128. if (stbi__get16be(s) != 1)
  6129. return stbi__errpuc("wrong version", "Unsupported version of PSD image");
  6130. // Skip 6 reserved bytes.
  6131. stbi__skip(s, 6);
  6132. // Read the number of channels (R, G, B, A, etc).
  6133. channelCount = stbi__get16be(s);
  6134. if (channelCount < 0 || channelCount > 16)
  6135. return stbi__errpuc(
  6136. "wrong channel count", "Unsupported number of channels in PSD image");
  6137. // Read the rows and columns of the image.
  6138. h = stbi__get32be(s);
  6139. w = stbi__get32be(s);
  6140. if (h > STBI_MAX_DIMENSIONS)
  6141. return stbi__errpuc("too large", "Very large image (corrupt?)");
  6142. if (w > STBI_MAX_DIMENSIONS)
  6143. return stbi__errpuc("too large", "Very large image (corrupt?)");
  6144. // Make sure the depth is 8 bits.
  6145. bitdepth = stbi__get16be(s);
  6146. if (bitdepth != 8 && bitdepth != 16)
  6147. return stbi__errpuc(
  6148. "unsupported bit depth", "PSD bit depth is not 8 or 16 bit");
  6149. // Make sure the color mode is RGB.
  6150. // Valid options are:
  6151. // 0: Bitmap
  6152. // 1: Grayscale
  6153. // 2: Indexed color
  6154. // 3: RGB color
  6155. // 4: CMYK color
  6156. // 7: Multichannel
  6157. // 8: Duotone
  6158. // 9: Lab color
  6159. if (stbi__get16be(s) != 3)
  6160. return stbi__errpuc("wrong color format", "PSD is not in RGB color format");
  6161. // Skip the Mode Data. (It's the palette for indexed color; other info for other
  6162. // modes.)
  6163. stbi__skip(s, stbi__get32be(s));
  6164. // Skip the image resources. (resolution, pen tool paths, etc)
  6165. stbi__skip(s, stbi__get32be(s));
  6166. // Skip the reserved data.
  6167. stbi__skip(s, stbi__get32be(s));
  6168. // Find out if the data is compressed.
  6169. // Known values:
  6170. // 0: no compression
  6171. // 1: RLE compressed
  6172. compression = stbi__get16be(s);
  6173. if (compression > 1)
  6174. return stbi__errpuc("bad compression", "PSD has an unknown compression format");
  6175. // Check size
  6176. if (!stbi__mad3sizes_valid(4, w, h, 0))
  6177. return stbi__errpuc("too large", "Corrupt PSD");
  6178. // Create the destination image.
  6179. if (!compression && bitdepth == 16 && bpc == 16) {
  6180. out = (stbi_uc*)stbi__malloc_mad3(8, w, h, 0);
  6181. ri->bits_per_channel = 16;
  6182. } else
  6183. out = (stbi_uc*)stbi__malloc(4 * w * h);
  6184. if (!out)
  6185. return stbi__errpuc("outofmem", "Out of memory");
  6186. pixelCount = w * h;
  6187. // Initialize the data to zero.
  6188. // memset( out, 0, pixelCount * 4 );
  6189. // Finally, the image data.
  6190. if (compression) {
  6191. // RLE as used by .PSD and .TIFF
  6192. // Loop until you get the number of unpacked bytes you are expecting:
  6193. // Read the next source byte into n.
  6194. // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally.
  6195. // Else if n is between -127 and -1 inclusive, copy the next byte -n+1
  6196. // times. Else if n is 128, noop.
  6197. // Endloop
  6198. // The RLE-compressed data is preceded by a 2-byte data count for each row in
  6199. // the data, which we're going to just skip.
  6200. stbi__skip(s, h * channelCount * 2);
  6201. // Read the RLE data by channel.
  6202. for (channel = 0; channel < 4; channel++) {
  6203. stbi_uc* p;
  6204. p = out + channel;
  6205. if (channel >= channelCount) {
  6206. // Fill this channel with default data.
  6207. for (i = 0; i < pixelCount; i++, p += 4)
  6208. *p = (channel == 3 ? 255 : 0);
  6209. } else {
  6210. // Read the RLE data.
  6211. if (!stbi__psd_decode_rle(s, p, pixelCount)) {
  6212. STBI_FREE(out);
  6213. return stbi__errpuc("corrupt", "bad RLE data");
  6214. }
  6215. }
  6216. }
  6217. } else {
  6218. // We're at the raw image data. It's each channel in order (Red, Green, Blue,
  6219. // Alpha, ...) where each channel consists of an 8-bit (or 16-bit) value for
  6220. // each pixel in the image.
  6221. // Read the data by channel.
  6222. for (channel = 0; channel < 4; channel++) {
  6223. if (channel >= channelCount) {
  6224. // Fill this channel with default data.
  6225. if (bitdepth == 16 && bpc == 16) {
  6226. stbi__uint16* q = ((stbi__uint16*)out) + channel;
  6227. stbi__uint16 val = channel == 3 ? 65535 : 0;
  6228. for (i = 0; i < pixelCount; i++, q += 4)
  6229. *q = val;
  6230. } else {
  6231. stbi_uc* p = out + channel;
  6232. stbi_uc val = channel == 3 ? 255 : 0;
  6233. for (i = 0; i < pixelCount; i++, p += 4)
  6234. *p = val;
  6235. }
  6236. } else {
  6237. if (ri->bits_per_channel == 16) { // output bpc
  6238. stbi__uint16* q = ((stbi__uint16*)out) + channel;
  6239. for (i = 0; i < pixelCount; i++, q += 4)
  6240. *q = (stbi__uint16)stbi__get16be(s);
  6241. } else {
  6242. stbi_uc* p = out + channel;
  6243. if (bitdepth == 16) { // input bpc
  6244. for (i = 0; i < pixelCount; i++, p += 4)
  6245. *p = (stbi_uc)(stbi__get16be(s) >> 8);
  6246. } else {
  6247. for (i = 0; i < pixelCount; i++, p += 4)
  6248. *p = stbi__get8(s);
  6249. }
  6250. }
  6251. }
  6252. }
  6253. }
  6254. // remove weird white matte from PSD
  6255. if (channelCount >= 4) {
  6256. if (ri->bits_per_channel == 16) {
  6257. for (i = 0; i < w * h; ++i) {
  6258. stbi__uint16* pixel = (stbi__uint16*)out + 4 * i;
  6259. if (pixel[3] != 0 && pixel[3] != 65535) {
  6260. float a = pixel[3] / 65535.0f;
  6261. float ra = 1.0f / a;
  6262. float inv_a = 65535.0f * (1 - ra);
  6263. pixel[0] = (stbi__uint16)(pixel[0] * ra + inv_a);
  6264. pixel[1] = (stbi__uint16)(pixel[1] * ra + inv_a);
  6265. pixel[2] = (stbi__uint16)(pixel[2] * ra + inv_a);
  6266. }
  6267. }
  6268. } else {
  6269. for (i = 0; i < w * h; ++i) {
  6270. unsigned char* pixel = out + 4 * i;
  6271. if (pixel[3] != 0 && pixel[3] != 255) {
  6272. float a = pixel[3] / 255.0f;
  6273. float ra = 1.0f / a;
  6274. float inv_a = 255.0f * (1 - ra);
  6275. pixel[0] = (unsigned char)(pixel[0] * ra + inv_a);
  6276. pixel[1] = (unsigned char)(pixel[1] * ra + inv_a);
  6277. pixel[2] = (unsigned char)(pixel[2] * ra + inv_a);
  6278. }
  6279. }
  6280. }
  6281. }
  6282. // convert to desired output format
  6283. if (req_comp && req_comp != 4) {
  6284. if (ri->bits_per_channel == 16)
  6285. out = (stbi_uc*)stbi__convert_format16(
  6286. (stbi__uint16*)out, 4, req_comp, w, h);
  6287. else
  6288. out = stbi__convert_format(out, 4, req_comp, w, h);
  6289. if (out == NULL)
  6290. return out; // stbi__convert_format frees input on failure
  6291. }
  6292. if (comp)
  6293. *comp = 4;
  6294. *y = h;
  6295. *x = w;
  6296. return out;
  6297. }
  6298. #endif
  6299. // *************************************************************************************************
  6300. // Softimage PIC loader
  6301. // by Tom Seddon
  6302. //
  6303. // See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format
  6304. // See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/
  6305. #ifndef STBI_NO_PIC
  6306. static int stbi__pic_is4(stbi__context* s, const char* str) {
  6307. int i;
  6308. for (i = 0; i < 4; ++i)
  6309. if (stbi__get8(s) != (stbi_uc)str[i])
  6310. return 0;
  6311. return 1;
  6312. }
  6313. static int stbi__pic_test_core(stbi__context* s) {
  6314. int i;
  6315. if (!stbi__pic_is4(s, "\x53\x80\xF6\x34"))
  6316. return 0;
  6317. for (i = 0; i < 84; ++i)
  6318. stbi__get8(s);
  6319. if (!stbi__pic_is4(s, "PICT"))
  6320. return 0;
  6321. return 1;
  6322. }
  6323. typedef struct {
  6324. stbi_uc size, type, channel;
  6325. } stbi__pic_packet;
  6326. static stbi_uc* stbi__readval(stbi__context* s, int channel, stbi_uc* dest) {
  6327. int mask = 0x80, i;
  6328. for (i = 0; i < 4; ++i, mask >>= 1) {
  6329. if (channel & mask) {
  6330. if (stbi__at_eof(s))
  6331. return stbi__errpuc("bad file", "PIC file too short");
  6332. dest[i] = stbi__get8(s);
  6333. }
  6334. }
  6335. return dest;
  6336. }
  6337. static void stbi__copyval(int channel, stbi_uc* dest, const stbi_uc* src) {
  6338. int mask = 0x80, i;
  6339. for (i = 0; i < 4; ++i, mask >>= 1)
  6340. if (channel & mask)
  6341. dest[i] = src[i];
  6342. }
  6343. static stbi_uc* stbi__pic_load_core(
  6344. stbi__context* s, int width, int height, int* comp, stbi_uc* result) {
  6345. int act_comp = 0, num_packets = 0, y, chained;
  6346. stbi__pic_packet packets[10];
  6347. // this will (should...) cater for even some bizarre stuff like having data
  6348. // for the same channel in multiple packets.
  6349. do {
  6350. stbi__pic_packet* packet;
  6351. if (num_packets == sizeof(packets) / sizeof(packets[0]))
  6352. return stbi__errpuc("bad format", "too many packets");
  6353. packet = &packets[num_packets++];
  6354. chained = stbi__get8(s);
  6355. packet->size = stbi__get8(s);
  6356. packet->type = stbi__get8(s);
  6357. packet->channel = stbi__get8(s);
  6358. act_comp |= packet->channel;
  6359. if (stbi__at_eof(s))
  6360. return stbi__errpuc("bad file", "file too short (reading packets)");
  6361. if (packet->size != 8)
  6362. return stbi__errpuc("bad format", "packet isn't 8bpp");
  6363. } while (chained);
  6364. *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel?
  6365. for (y = 0; y < height; ++y) {
  6366. int packet_idx;
  6367. for (packet_idx = 0; packet_idx < num_packets; ++packet_idx) {
  6368. stbi__pic_packet* packet = &packets[packet_idx];
  6369. stbi_uc* dest = result + y * width * 4;
  6370. switch (packet->type) {
  6371. default:
  6372. return stbi__errpuc(
  6373. "bad format", "packet has bad compression type");
  6374. case 0: { // uncompressed
  6375. int x;
  6376. for (x = 0; x < width; ++x, dest += 4)
  6377. if (!stbi__readval(s, packet->channel, dest))
  6378. return 0;
  6379. break;
  6380. }
  6381. case 1: // Pure RLE
  6382. {
  6383. int left = width, i;
  6384. while (left > 0) {
  6385. stbi_uc count, value[4];
  6386. count = stbi__get8(s);
  6387. if (stbi__at_eof(s))
  6388. return stbi__errpuc(
  6389. "bad file", "file too short (pure read count)");
  6390. if (count > left)
  6391. count = (stbi_uc)left;
  6392. if (!stbi__readval(s, packet->channel, value))
  6393. return 0;
  6394. for (i = 0; i < count; ++i, dest += 4)
  6395. stbi__copyval(packet->channel, dest, value);
  6396. left -= count;
  6397. }
  6398. } break;
  6399. case 2: { // Mixed RLE
  6400. int left = width;
  6401. while (left > 0) {
  6402. int count = stbi__get8(s), i;
  6403. if (stbi__at_eof(s))
  6404. return stbi__errpuc(
  6405. "bad file", "file too short (mixed read count)");
  6406. if (count >= 128) { // Repeated
  6407. stbi_uc value[4];
  6408. if (count == 128)
  6409. count = stbi__get16be(s);
  6410. else
  6411. count -= 127;
  6412. if (count > left)
  6413. return stbi__errpuc("bad file", "scanline overrun");
  6414. if (!stbi__readval(s, packet->channel, value))
  6415. return 0;
  6416. for (i = 0; i < count; ++i, dest += 4)
  6417. stbi__copyval(packet->channel, dest, value);
  6418. } else { // Raw
  6419. ++count;
  6420. if (count > left)
  6421. return stbi__errpuc("bad file", "scanline overrun");
  6422. for (i = 0; i < count; ++i, dest += 4)
  6423. if (!stbi__readval(s, packet->channel, dest))
  6424. return 0;
  6425. }
  6426. left -= count;
  6427. }
  6428. break;
  6429. }
  6430. }
  6431. }
  6432. }
  6433. return result;
  6434. }
  6435. static void* stbi__pic_load(
  6436. stbi__context* s, int* px, int* py, int* comp, int req_comp,
  6437. stbi__result_info* ri) {
  6438. stbi_uc* result;
  6439. int i, x, y, internal_comp;
  6440. STBI_NOTUSED(ri);
  6441. if (!comp)
  6442. comp = &internal_comp;
  6443. for (i = 0; i < 92; ++i)
  6444. stbi__get8(s);
  6445. x = stbi__get16be(s);
  6446. y = stbi__get16be(s);
  6447. if (y > STBI_MAX_DIMENSIONS)
  6448. return stbi__errpuc("too large", "Very large image (corrupt?)");
  6449. if (x > STBI_MAX_DIMENSIONS)
  6450. return stbi__errpuc("too large", "Very large image (corrupt?)");
  6451. if (stbi__at_eof(s))
  6452. return stbi__errpuc("bad file", "file too short (pic header)");
  6453. if (!stbi__mad3sizes_valid(x, y, 4, 0))
  6454. return stbi__errpuc("too large", "PIC image too large to decode");
  6455. stbi__get32be(s); // skip `ratio'
  6456. stbi__get16be(s); // skip `fields'
  6457. stbi__get16be(s); // skip `pad'
  6458. // intermediate buffer is RGBA
  6459. result = (stbi_uc*)stbi__malloc_mad3(x, y, 4, 0);
  6460. if (!result)
  6461. return stbi__errpuc("outofmem", "Out of memory");
  6462. memset(result, 0xff, x * y * 4);
  6463. if (!stbi__pic_load_core(s, x, y, comp, result)) {
  6464. STBI_FREE(result);
  6465. result = 0;
  6466. }
  6467. *px = x;
  6468. *py = y;
  6469. if (req_comp == 0)
  6470. req_comp = *comp;
  6471. result = stbi__convert_format(result, 4, req_comp, x, y);
  6472. return result;
  6473. }
  6474. static int stbi__pic_test(stbi__context* s) {
  6475. int r = stbi__pic_test_core(s);
  6476. stbi__rewind(s);
  6477. return r;
  6478. }
  6479. #endif
  6480. // *************************************************************************************************
  6481. // GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb
  6482. #ifndef STBI_NO_GIF
  6483. typedef struct {
  6484. stbi__int16 prefix;
  6485. stbi_uc first;
  6486. stbi_uc suffix;
  6487. } stbi__gif_lzw;
  6488. typedef struct {
  6489. int w, h;
  6490. stbi_uc* out; // output buffer (always 4 components)
  6491. stbi_uc* background; // The current "background" as far as a gif is concerned
  6492. stbi_uc* history;
  6493. int flags, bgindex, ratio, transparent, eflags;
  6494. stbi_uc pal[256][4];
  6495. stbi_uc lpal[256][4];
  6496. stbi__gif_lzw codes[8192];
  6497. stbi_uc* color_table;
  6498. int parse, step;
  6499. int lflags;
  6500. int start_x, start_y;
  6501. int max_x, max_y;
  6502. int cur_x, cur_y;
  6503. int line_size;
  6504. int delay;
  6505. } stbi__gif;
  6506. static int stbi__gif_test_raw(stbi__context* s) {
  6507. int sz;
  6508. if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' ||
  6509. stbi__get8(s) != '8')
  6510. return 0;
  6511. sz = stbi__get8(s);
  6512. if (sz != '9' && sz != '7')
  6513. return 0;
  6514. if (stbi__get8(s) != 'a')
  6515. return 0;
  6516. return 1;
  6517. }
  6518. static int stbi__gif_test(stbi__context* s) {
  6519. int r = stbi__gif_test_raw(s);
  6520. stbi__rewind(s);
  6521. return r;
  6522. }
  6523. static void stbi__gif_parse_colortable(
  6524. stbi__context* s, stbi_uc pal[256][4], int num_entries, int transp) {
  6525. int i;
  6526. for (i = 0; i < num_entries; ++i) {
  6527. pal[i][2] = stbi__get8(s);
  6528. pal[i][1] = stbi__get8(s);
  6529. pal[i][0] = stbi__get8(s);
  6530. pal[i][3] = transp == i ? 0 : 255;
  6531. }
  6532. }
  6533. static int stbi__gif_header(stbi__context* s, stbi__gif* g, int* comp, int is_info) {
  6534. stbi_uc version;
  6535. if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' ||
  6536. stbi__get8(s) != '8')
  6537. return stbi__err("not GIF", "Corrupt GIF");
  6538. version = stbi__get8(s);
  6539. if (version != '7' && version != '9')
  6540. return stbi__err("not GIF", "Corrupt GIF");
  6541. if (stbi__get8(s) != 'a')
  6542. return stbi__err("not GIF", "Corrupt GIF");
  6543. stbi__g_failure_reason = "";
  6544. g->w = stbi__get16le(s);
  6545. g->h = stbi__get16le(s);
  6546. g->flags = stbi__get8(s);
  6547. g->bgindex = stbi__get8(s);
  6548. g->ratio = stbi__get8(s);
  6549. g->transparent = -1;
  6550. if (g->w > STBI_MAX_DIMENSIONS)
  6551. return stbi__err("too large", "Very large image (corrupt?)");
  6552. if (g->h > STBI_MAX_DIMENSIONS)
  6553. return stbi__err("too large", "Very large image (corrupt?)");
  6554. if (comp != 0)
  6555. *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments
  6556. if (is_info)
  6557. return 1;
  6558. if (g->flags & 0x80)
  6559. stbi__gif_parse_colortable(s, g->pal, 2 << (g->flags & 7), -1);
  6560. return 1;
  6561. }
  6562. static int stbi__gif_info_raw(stbi__context* s, int* x, int* y, int* comp) {
  6563. stbi__gif* g = (stbi__gif*)stbi__malloc(sizeof(stbi__gif));
  6564. if (!g)
  6565. return stbi__err("outofmem", "Out of memory");
  6566. if (!stbi__gif_header(s, g, comp, 1)) {
  6567. STBI_FREE(g);
  6568. stbi__rewind(s);
  6569. return 0;
  6570. }
  6571. if (x)
  6572. *x = g->w;
  6573. if (y)
  6574. *y = g->h;
  6575. STBI_FREE(g);
  6576. return 1;
  6577. }
  6578. static void stbi__out_gif_code(stbi__gif* g, stbi__uint16 code) {
  6579. stbi_uc *p, *c;
  6580. int idx;
  6581. // recurse to decode the prefixes, since the linked-list is backwards,
  6582. // and working backwards through an interleaved image would be nasty
  6583. if (g->codes[code].prefix >= 0)
  6584. stbi__out_gif_code(g, g->codes[code].prefix);
  6585. if (g->cur_y >= g->max_y)
  6586. return;
  6587. idx = g->cur_x + g->cur_y;
  6588. p = &g->out[idx];
  6589. g->history[idx / 4] = 1;
  6590. c = &g->color_table[g->codes[code].suffix * 4];
  6591. if (c[3] > 128) { // don't render transparent pixels;
  6592. p[0] = c[2];
  6593. p[1] = c[1];
  6594. p[2] = c[0];
  6595. p[3] = c[3];
  6596. }
  6597. g->cur_x += 4;
  6598. if (g->cur_x >= g->max_x) {
  6599. g->cur_x = g->start_x;
  6600. g->cur_y += g->step;
  6601. while (g->cur_y >= g->max_y && g->parse > 0) {
  6602. g->step = (1 << g->parse) * g->line_size;
  6603. g->cur_y = g->start_y + (g->step >> 1);
  6604. --g->parse;
  6605. }
  6606. }
  6607. }
  6608. static stbi_uc* stbi__process_gif_raster(stbi__context* s, stbi__gif* g) {
  6609. stbi_uc lzw_cs;
  6610. stbi__int32 len, init_code;
  6611. stbi__uint32 first;
  6612. stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear;
  6613. stbi__gif_lzw* p;
  6614. lzw_cs = stbi__get8(s);
  6615. if (lzw_cs > 12)
  6616. return NULL;
  6617. clear = 1 << lzw_cs;
  6618. first = 1;
  6619. codesize = lzw_cs + 1;
  6620. codemask = (1 << codesize) - 1;
  6621. bits = 0;
  6622. valid_bits = 0;
  6623. for (init_code = 0; init_code < clear; init_code++) {
  6624. g->codes[init_code].prefix = -1;
  6625. g->codes[init_code].first = (stbi_uc)init_code;
  6626. g->codes[init_code].suffix = (stbi_uc)init_code;
  6627. }
  6628. // support no starting clear code
  6629. avail = clear + 2;
  6630. oldcode = -1;
  6631. len = 0;
  6632. for (;;) {
  6633. if (valid_bits < codesize) {
  6634. if (len == 0) {
  6635. len = stbi__get8(s); // start new block
  6636. if (len == 0)
  6637. return g->out;
  6638. }
  6639. --len;
  6640. bits |= (stbi__int32)stbi__get8(s) << valid_bits;
  6641. valid_bits += 8;
  6642. } else {
  6643. stbi__int32 code = bits & codemask;
  6644. bits >>= codesize;
  6645. valid_bits -= codesize;
  6646. // @OPTIMIZE: is there some way we can accelerate the non-clear path?
  6647. if (code == clear) { // clear code
  6648. codesize = lzw_cs + 1;
  6649. codemask = (1 << codesize) - 1;
  6650. avail = clear + 2;
  6651. oldcode = -1;
  6652. first = 0;
  6653. } else if (code == clear + 1) { // end of stream code
  6654. stbi__skip(s, len);
  6655. while ((len = stbi__get8(s)) > 0)
  6656. stbi__skip(s, len);
  6657. return g->out;
  6658. } else if (code <= avail) {
  6659. if (first) {
  6660. return stbi__errpuc("no clear code", "Corrupt GIF");
  6661. }
  6662. if (oldcode >= 0) {
  6663. p = &g->codes[avail++];
  6664. if (avail > 8192) {
  6665. return stbi__errpuc("too many codes", "Corrupt GIF");
  6666. }
  6667. p->prefix = (stbi__int16)oldcode;
  6668. p->first = g->codes[oldcode].first;
  6669. p->suffix = (code == avail) ? p->first : g->codes[code].first;
  6670. } else if (code == avail)
  6671. return stbi__errpuc("illegal code in raster", "Corrupt GIF");
  6672. stbi__out_gif_code(g, (stbi__uint16)code);
  6673. if ((avail & codemask) == 0 && avail <= 0x0FFF) {
  6674. codesize++;
  6675. codemask = (1 << codesize) - 1;
  6676. }
  6677. oldcode = code;
  6678. } else {
  6679. return stbi__errpuc("illegal code in raster", "Corrupt GIF");
  6680. }
  6681. }
  6682. }
  6683. }
  6684. // this function is designed to support animated gifs, although stb_image doesn't
  6685. // support it two back is the image from two frames ago, used for a very specific
  6686. // disposal format
  6687. static stbi_uc* stbi__gif_load_next(
  6688. stbi__context* s, stbi__gif* g, int* comp, int req_comp, stbi_uc* two_back) {
  6689. int dispose;
  6690. int first_frame;
  6691. int pi;
  6692. int pcount;
  6693. STBI_NOTUSED(req_comp);
  6694. // on first frame, any non-written pixels get the background colour (non-transparent)
  6695. first_frame = 0;
  6696. if (g->out == 0) {
  6697. if (!stbi__gif_header(s, g, comp, 0))
  6698. return 0; // stbi__g_failure_reason set by stbi__gif_header
  6699. if (!stbi__mad3sizes_valid(4, g->w, g->h, 0))
  6700. return stbi__errpuc("too large", "GIF image is too large");
  6701. pcount = g->w * g->h;
  6702. g->out = (stbi_uc*)stbi__malloc(4 * pcount);
  6703. g->background = (stbi_uc*)stbi__malloc(4 * pcount);
  6704. g->history = (stbi_uc*)stbi__malloc(pcount);
  6705. if (!g->out || !g->background || !g->history)
  6706. return stbi__errpuc("outofmem", "Out of memory");
  6707. // image is treated as "transparent" at the start - ie, nothing overwrites the
  6708. // current background; background colour is only used for pixels that are not
  6709. // rendered first frame, after that "background" color refers to the color that
  6710. // was there the previous frame.
  6711. memset(g->out, 0x00, 4 * pcount);
  6712. memset(g->background, 0x00,
  6713. 4 * pcount); // state of the background (starts transparent)
  6714. memset(g->history, 0x00, pcount); // pixels that were affected previous frame
  6715. first_frame = 1;
  6716. } else {
  6717. // second frame - how do we dispose of the previous one?
  6718. dispose = (g->eflags & 0x1C) >> 2;
  6719. pcount = g->w * g->h;
  6720. if ((dispose == 3) && (two_back == 0)) {
  6721. dispose = 2; // if I don't have an image to revert back to, default to the
  6722. // old background
  6723. }
  6724. if (dispose == 3) { // use previous graphic
  6725. for (pi = 0; pi < pcount; ++pi) {
  6726. if (g->history[pi]) {
  6727. memcpy(&g->out[pi * 4], &two_back[pi * 4], 4);
  6728. }
  6729. }
  6730. } else if (dispose == 2) {
  6731. // restore what was changed last frame to background before that frame;
  6732. for (pi = 0; pi < pcount; ++pi) {
  6733. if (g->history[pi]) {
  6734. memcpy(&g->out[pi * 4], &g->background[pi * 4], 4);
  6735. }
  6736. }
  6737. } else {
  6738. // This is a non-disposal case eithe way, so just
  6739. // leave the pixels as is, and they will become the new background
  6740. // 1: do not dispose
  6741. // 0: not specified.
  6742. }
  6743. // background is what out is after the undoing of the previou frame;
  6744. memcpy(g->background, g->out, 4 * g->w * g->h);
  6745. }
  6746. // clear my history;
  6747. memset(g->history, 0x00, g->w * g->h); // pixels that were affected previous frame
  6748. for (;;) {
  6749. int tag = stbi__get8(s);
  6750. switch (tag) {
  6751. case 0x2C: /* Image Descriptor */
  6752. {
  6753. stbi__int32 x, y, w, h;
  6754. stbi_uc* o;
  6755. x = stbi__get16le(s);
  6756. y = stbi__get16le(s);
  6757. w = stbi__get16le(s);
  6758. h = stbi__get16le(s);
  6759. if (((x + w) > (g->w)) || ((y + h) > (g->h)))
  6760. return stbi__errpuc("bad Image Descriptor", "Corrupt GIF");
  6761. g->line_size = g->w * 4;
  6762. g->start_x = x * 4;
  6763. g->start_y = y * g->line_size;
  6764. g->max_x = g->start_x + w * 4;
  6765. g->max_y = g->start_y + h * g->line_size;
  6766. g->cur_x = g->start_x;
  6767. g->cur_y = g->start_y;
  6768. // if the width of the specified rectangle is 0, that means
  6769. // we may not see *any* pixels or the image is malformed;
  6770. // to make sure this is caught, move the current y down to
  6771. // max_y (which is what out_gif_code checks).
  6772. if (w == 0)
  6773. g->cur_y = g->max_y;
  6774. g->lflags = stbi__get8(s);
  6775. if (g->lflags & 0x40) {
  6776. g->step = 8 * g->line_size; // first interlaced spacing
  6777. g->parse = 3;
  6778. } else {
  6779. g->step = g->line_size;
  6780. g->parse = 0;
  6781. }
  6782. if (g->lflags & 0x80) {
  6783. stbi__gif_parse_colortable(
  6784. s, g->lpal, 2 << (g->lflags & 7),
  6785. g->eflags & 0x01 ? g->transparent : -1);
  6786. g->color_table = (stbi_uc*)g->lpal;
  6787. } else if (g->flags & 0x80) {
  6788. g->color_table = (stbi_uc*)g->pal;
  6789. } else
  6790. return stbi__errpuc("missing color table", "Corrupt GIF");
  6791. o = stbi__process_gif_raster(s, g);
  6792. if (!o)
  6793. return NULL;
  6794. // if this was the first frame,
  6795. pcount = g->w * g->h;
  6796. if (first_frame && (g->bgindex > 0)) {
  6797. // if first frame, any pixel not drawn to gets the background color
  6798. for (pi = 0; pi < pcount; ++pi) {
  6799. if (g->history[pi] == 0) {
  6800. g->pal[g->bgindex][3] =
  6801. 255; // just in case it was made transparent,
  6802. // undo that; It will be reset next frame
  6803. // if need be;
  6804. memcpy(&g->out[pi * 4], &g->pal[g->bgindex], 4);
  6805. }
  6806. }
  6807. }
  6808. return o;
  6809. }
  6810. case 0x21: // Comment Extension.
  6811. {
  6812. int len;
  6813. int ext = stbi__get8(s);
  6814. if (ext == 0xF9) { // Graphic Control Extension.
  6815. len = stbi__get8(s);
  6816. if (len == 4) {
  6817. g->eflags = stbi__get8(s);
  6818. g->delay =
  6819. 10 * stbi__get16le(s); // delay - 1/100th of a second,
  6820. // saving as 1/1000ths.
  6821. // unset old transparent
  6822. if (g->transparent >= 0) {
  6823. g->pal[g->transparent][3] = 255;
  6824. }
  6825. if (g->eflags & 0x01) {
  6826. g->transparent = stbi__get8(s);
  6827. if (g->transparent >= 0) {
  6828. g->pal[g->transparent][3] = 0;
  6829. }
  6830. } else {
  6831. // don't need transparent
  6832. stbi__skip(s, 1);
  6833. g->transparent = -1;
  6834. }
  6835. } else {
  6836. stbi__skip(s, len);
  6837. break;
  6838. }
  6839. }
  6840. while ((len = stbi__get8(s)) != 0) {
  6841. stbi__skip(s, len);
  6842. }
  6843. break;
  6844. }
  6845. case 0x3B: // gif stream termination code
  6846. return (stbi_uc*)s; // using '1' causes warning on some compilers
  6847. default:
  6848. return stbi__errpuc("unknown code", "Corrupt GIF");
  6849. }
  6850. }
  6851. }
  6852. static void* stbi__load_gif_main_outofmem(stbi__gif* g, stbi_uc* out, int** delays) {
  6853. STBI_FREE(g->out);
  6854. STBI_FREE(g->history);
  6855. STBI_FREE(g->background);
  6856. if (out)
  6857. STBI_FREE(out);
  6858. if (delays && *delays)
  6859. STBI_FREE(*delays);
  6860. return stbi__errpuc("outofmem", "Out of memory");
  6861. }
  6862. static void* stbi__load_gif_main(
  6863. stbi__context* s, int** delays, int* x, int* y, int* z, int* comp,
  6864. int req_comp) {
  6865. if (stbi__gif_test(s)) {
  6866. int layers = 0;
  6867. stbi_uc* u = 0;
  6868. stbi_uc* out = 0;
  6869. stbi_uc* two_back = 0;
  6870. stbi__gif g;
  6871. int stride;
  6872. int out_size = 0;
  6873. int delays_size = 0;
  6874. STBI_NOTUSED(out_size);
  6875. STBI_NOTUSED(delays_size);
  6876. memset(&g, 0, sizeof(g));
  6877. if (delays) {
  6878. *delays = 0;
  6879. }
  6880. do {
  6881. u = stbi__gif_load_next(s, &g, comp, req_comp, two_back);
  6882. if (u == (stbi_uc*)s)
  6883. u = 0; // end of animated gif marker
  6884. if (u) {
  6885. *x = g.w;
  6886. *y = g.h;
  6887. ++layers;
  6888. stride = g.w * g.h * 4;
  6889. if (out) {
  6890. void* tmp = (stbi_uc*)STBI_REALLOC_SIZED(
  6891. out, out_size, layers * stride);
  6892. if (!tmp)
  6893. return stbi__load_gif_main_outofmem(&g, out, delays);
  6894. else {
  6895. out = (stbi_uc*)tmp;
  6896. out_size = layers * stride;
  6897. }
  6898. if (delays) {
  6899. int* new_delays = (int*)STBI_REALLOC_SIZED(
  6900. *delays, delays_size, sizeof(int) * layers);
  6901. if (!new_delays)
  6902. return stbi__load_gif_main_outofmem(&g, out, delays);
  6903. *delays = new_delays;
  6904. delays_size = layers * sizeof(int);
  6905. }
  6906. } else {
  6907. out = (stbi_uc*)stbi__malloc(layers * stride);
  6908. if (!out)
  6909. return stbi__load_gif_main_outofmem(&g, out, delays);
  6910. out_size = layers * stride;
  6911. if (delays) {
  6912. *delays = (int*)stbi__malloc(layers * sizeof(int));
  6913. if (!*delays)
  6914. return stbi__load_gif_main_outofmem(&g, out, delays);
  6915. delays_size = layers * sizeof(int);
  6916. }
  6917. }
  6918. memcpy(out + ((layers - 1) * stride), u, stride);
  6919. if (layers >= 2) {
  6920. two_back = out - 2 * stride;
  6921. }
  6922. if (delays) {
  6923. (*delays)[layers - 1U] = g.delay;
  6924. }
  6925. }
  6926. } while (u != 0);
  6927. // free temp buffer;
  6928. STBI_FREE(g.out);
  6929. STBI_FREE(g.history);
  6930. STBI_FREE(g.background);
  6931. // do the final conversion after loading everything;
  6932. if (req_comp && req_comp != 4)
  6933. out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h);
  6934. *z = layers;
  6935. return out;
  6936. } else {
  6937. return stbi__errpuc("not GIF", "Image was not as a gif type.");
  6938. }
  6939. }
  6940. static void* stbi__gif_load(
  6941. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  6942. stbi__result_info* ri) {
  6943. stbi_uc* u = 0;
  6944. stbi__gif g;
  6945. memset(&g, 0, sizeof(g));
  6946. STBI_NOTUSED(ri);
  6947. u = stbi__gif_load_next(s, &g, comp, req_comp, 0);
  6948. if (u == (stbi_uc*)s)
  6949. u = 0; // end of animated gif marker
  6950. if (u) {
  6951. *x = g.w;
  6952. *y = g.h;
  6953. // moved conversion to after successful load so that the same
  6954. // can be done for multiple frames.
  6955. if (req_comp && req_comp != 4)
  6956. u = stbi__convert_format(u, 4, req_comp, g.w, g.h);
  6957. } else if (g.out) {
  6958. // if there was an error and we allocated an image buffer, free it!
  6959. STBI_FREE(g.out);
  6960. }
  6961. // free buffers needed for multiple frame loading;
  6962. STBI_FREE(g.history);
  6963. STBI_FREE(g.background);
  6964. return u;
  6965. }
  6966. static int stbi__gif_info(stbi__context* s, int* x, int* y, int* comp) {
  6967. return stbi__gif_info_raw(s, x, y, comp);
  6968. }
  6969. #endif
  6970. // *************************************************************************************************
  6971. // Radiance RGBE HDR loader
  6972. // originally by Nicolas Schulz
  6973. #ifndef STBI_NO_HDR
  6974. static int stbi__hdr_test_core(stbi__context* s, const char* signature) {
  6975. int i;
  6976. for (i = 0; signature[i]; ++i)
  6977. if (stbi__get8(s) != signature[i])
  6978. return 0;
  6979. stbi__rewind(s);
  6980. return 1;
  6981. }
  6982. static int stbi__hdr_test(stbi__context* s) {
  6983. int r = stbi__hdr_test_core(s, "#?RADIANCE\n");
  6984. stbi__rewind(s);
  6985. if (!r) {
  6986. r = stbi__hdr_test_core(s, "#?RGBE\n");
  6987. stbi__rewind(s);
  6988. }
  6989. return r;
  6990. }
  6991. #define STBI__HDR_BUFLEN 1024
  6992. static char* stbi__hdr_gettoken(stbi__context* z, char* buffer) {
  6993. int len = 0;
  6994. char c = '\0';
  6995. c = (char)stbi__get8(z);
  6996. while (!stbi__at_eof(z) && c != '\n') {
  6997. buffer[len++] = c;
  6998. if (len == STBI__HDR_BUFLEN - 1) {
  6999. // flush to end of line
  7000. while (!stbi__at_eof(z) && stbi__get8(z) != '\n')
  7001. ;
  7002. break;
  7003. }
  7004. c = (char)stbi__get8(z);
  7005. }
  7006. buffer[len] = 0;
  7007. return buffer;
  7008. }
  7009. static void stbi__hdr_convert(float* output, stbi_uc* input, int req_comp) {
  7010. if (input[3] != 0) {
  7011. float f1;
  7012. // Exponent
  7013. f1 = (float)ldexp(1.0f, input[3] - (int)(128 + 8));
  7014. if (req_comp <= 2)
  7015. output[0] = (input[0] + input[1] + input[2]) * f1 / 3;
  7016. else {
  7017. output[0] = input[0] * f1;
  7018. output[1] = input[1] * f1;
  7019. output[2] = input[2] * f1;
  7020. }
  7021. if (req_comp == 2)
  7022. output[1] = 1;
  7023. if (req_comp == 4)
  7024. output[3] = 1;
  7025. } else {
  7026. switch (req_comp) {
  7027. case 4:
  7028. output[3] = 1; /* fallthrough */
  7029. case 3:
  7030. output[0] = output[1] = output[2] = 0;
  7031. break;
  7032. case 2:
  7033. output[1] = 1; /* fallthrough */
  7034. case 1:
  7035. output[0] = 0;
  7036. break;
  7037. }
  7038. }
  7039. }
  7040. static float* stbi__hdr_load(
  7041. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  7042. stbi__result_info* ri) {
  7043. char buffer[STBI__HDR_BUFLEN];
  7044. char* token;
  7045. int valid = 0;
  7046. int width, height;
  7047. stbi_uc* scanline;
  7048. float* hdr_data;
  7049. int len;
  7050. unsigned char count, value;
  7051. int i, j, k, c1, c2, z;
  7052. const char* headerToken;
  7053. STBI_NOTUSED(ri);
  7054. // Check identifier
  7055. headerToken = stbi__hdr_gettoken(s, buffer);
  7056. if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0)
  7057. return stbi__errpf("not HDR", "Corrupt HDR image");
  7058. // Parse header
  7059. for (;;) {
  7060. token = stbi__hdr_gettoken(s, buffer);
  7061. if (token[0] == 0)
  7062. break;
  7063. if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0)
  7064. valid = 1;
  7065. }
  7066. if (!valid)
  7067. return stbi__errpf("unsupported format", "Unsupported HDR format");
  7068. // Parse width and height
  7069. // can't use sscanf() if we're not using stdio!
  7070. token = stbi__hdr_gettoken(s, buffer);
  7071. if (strncmp(token, "-Y ", 3))
  7072. return stbi__errpf("unsupported data layout", "Unsupported HDR format");
  7073. token += 3;
  7074. height = (int)strtol(token, &token, 10);
  7075. while (*token == ' ')
  7076. ++token;
  7077. if (strncmp(token, "+X ", 3))
  7078. return stbi__errpf("unsupported data layout", "Unsupported HDR format");
  7079. token += 3;
  7080. width = (int)strtol(token, NULL, 10);
  7081. if (height > STBI_MAX_DIMENSIONS)
  7082. return stbi__errpf("too large", "Very large image (corrupt?)");
  7083. if (width > STBI_MAX_DIMENSIONS)
  7084. return stbi__errpf("too large", "Very large image (corrupt?)");
  7085. *x = width;
  7086. *y = height;
  7087. if (comp)
  7088. *comp = 3;
  7089. if (req_comp == 0)
  7090. req_comp = 3;
  7091. if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0))
  7092. return stbi__errpf("too large", "HDR image is too large");
  7093. // Read data
  7094. hdr_data = (float*)stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0);
  7095. if (!hdr_data)
  7096. return stbi__errpf("outofmem", "Out of memory");
  7097. // Load image data
  7098. // image data is stored as some number of sca
  7099. if (width < 8 || width >= 32768) {
  7100. // Read flat data
  7101. for (j = 0; j < height; ++j) {
  7102. for (i = 0; i < width; ++i) {
  7103. stbi_uc rgbe[4];
  7104. main_decode_loop:
  7105. stbi__getn(s, rgbe, 4);
  7106. stbi__hdr_convert(
  7107. hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp);
  7108. }
  7109. }
  7110. } else {
  7111. // Read RLE-encoded data
  7112. scanline = NULL;
  7113. for (j = 0; j < height; ++j) {
  7114. c1 = stbi__get8(s);
  7115. c2 = stbi__get8(s);
  7116. len = stbi__get8(s);
  7117. if (c1 != 2 || c2 != 2 || (len & 0x80)) {
  7118. // not run-length encoded, so we have to actually use THIS data as a
  7119. // decoded pixel (note this can't be a valid pixel--one of RGB must be
  7120. // >= 128)
  7121. stbi_uc rgbe[4];
  7122. rgbe[0] = (stbi_uc)c1;
  7123. rgbe[1] = (stbi_uc)c2;
  7124. rgbe[2] = (stbi_uc)len;
  7125. rgbe[3] = (stbi_uc)stbi__get8(s);
  7126. stbi__hdr_convert(hdr_data, rgbe, req_comp);
  7127. i = 1;
  7128. j = 0;
  7129. STBI_FREE(scanline);
  7130. goto main_decode_loop; // yes, this makes no sense
  7131. }
  7132. len <<= 8;
  7133. len |= stbi__get8(s);
  7134. if (len != width) {
  7135. STBI_FREE(hdr_data);
  7136. STBI_FREE(scanline);
  7137. return stbi__errpf("invalid decoded scanline length", "corrupt HDR");
  7138. }
  7139. if (scanline == NULL) {
  7140. scanline = (stbi_uc*)stbi__malloc_mad2(width, 4, 0);
  7141. if (!scanline) {
  7142. STBI_FREE(hdr_data);
  7143. return stbi__errpf("outofmem", "Out of memory");
  7144. }
  7145. }
  7146. for (k = 0; k < 4; ++k) {
  7147. int nleft;
  7148. i = 0;
  7149. while ((nleft = width - i) > 0) {
  7150. count = stbi__get8(s);
  7151. if (count > 128) {
  7152. // Run
  7153. value = stbi__get8(s);
  7154. count -= 128;
  7155. if (count > nleft) {
  7156. STBI_FREE(hdr_data);
  7157. STBI_FREE(scanline);
  7158. return stbi__errpf("corrupt", "bad RLE data in HDR");
  7159. }
  7160. for (z = 0; z < count; ++z)
  7161. scanline[i++ * 4 + k] = value;
  7162. } else {
  7163. // Dump
  7164. if (count > nleft) {
  7165. STBI_FREE(hdr_data);
  7166. STBI_FREE(scanline);
  7167. return stbi__errpf("corrupt", "bad RLE data in HDR");
  7168. }
  7169. for (z = 0; z < count; ++z)
  7170. scanline[i++ * 4 + k] = stbi__get8(s);
  7171. }
  7172. }
  7173. }
  7174. for (i = 0; i < width; ++i)
  7175. stbi__hdr_convert(
  7176. hdr_data + (j * width + i) * req_comp, scanline + i * 4,
  7177. req_comp);
  7178. }
  7179. if (scanline)
  7180. STBI_FREE(scanline);
  7181. }
  7182. return hdr_data;
  7183. }
  7184. static int stbi__hdr_info(stbi__context* s, int* x, int* y, int* comp) {
  7185. char buffer[STBI__HDR_BUFLEN];
  7186. char* token;
  7187. int valid = 0;
  7188. int dummy;
  7189. if (!x)
  7190. x = &dummy;
  7191. if (!y)
  7192. y = &dummy;
  7193. if (!comp)
  7194. comp = &dummy;
  7195. if (stbi__hdr_test(s) == 0) {
  7196. stbi__rewind(s);
  7197. return 0;
  7198. }
  7199. for (;;) {
  7200. token = stbi__hdr_gettoken(s, buffer);
  7201. if (token[0] == 0)
  7202. break;
  7203. if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0)
  7204. valid = 1;
  7205. }
  7206. if (!valid) {
  7207. stbi__rewind(s);
  7208. return 0;
  7209. }
  7210. token = stbi__hdr_gettoken(s, buffer);
  7211. if (strncmp(token, "-Y ", 3)) {
  7212. stbi__rewind(s);
  7213. return 0;
  7214. }
  7215. token += 3;
  7216. *y = (int)strtol(token, &token, 10);
  7217. while (*token == ' ')
  7218. ++token;
  7219. if (strncmp(token, "+X ", 3)) {
  7220. stbi__rewind(s);
  7221. return 0;
  7222. }
  7223. token += 3;
  7224. *x = (int)strtol(token, NULL, 10);
  7225. *comp = 3;
  7226. return 1;
  7227. }
  7228. #endif // STBI_NO_HDR
  7229. #ifndef STBI_NO_BMP
  7230. static int stbi__bmp_info(stbi__context* s, int* x, int* y, int* comp) {
  7231. void* p;
  7232. stbi__bmp_data info;
  7233. info.all_a = 255;
  7234. p = stbi__bmp_parse_header(s, &info);
  7235. if (p == NULL) {
  7236. stbi__rewind(s);
  7237. return 0;
  7238. }
  7239. if (x)
  7240. *x = s->img_x;
  7241. if (y)
  7242. *y = s->img_y;
  7243. if (comp) {
  7244. if (info.bpp == 24 && info.ma == 0xff000000)
  7245. *comp = 3;
  7246. else
  7247. *comp = info.ma ? 4 : 3;
  7248. }
  7249. return 1;
  7250. }
  7251. #endif
  7252. #ifndef STBI_NO_PSD
  7253. static int stbi__psd_info(stbi__context* s, int* x, int* y, int* comp) {
  7254. int channelCount, dummy, depth;
  7255. if (!x)
  7256. x = &dummy;
  7257. if (!y)
  7258. y = &dummy;
  7259. if (!comp)
  7260. comp = &dummy;
  7261. if (stbi__get32be(s) != 0x38425053) {
  7262. stbi__rewind(s);
  7263. return 0;
  7264. }
  7265. if (stbi__get16be(s) != 1) {
  7266. stbi__rewind(s);
  7267. return 0;
  7268. }
  7269. stbi__skip(s, 6);
  7270. channelCount = stbi__get16be(s);
  7271. if (channelCount < 0 || channelCount > 16) {
  7272. stbi__rewind(s);
  7273. return 0;
  7274. }
  7275. *y = stbi__get32be(s);
  7276. *x = stbi__get32be(s);
  7277. depth = stbi__get16be(s);
  7278. if (depth != 8 && depth != 16) {
  7279. stbi__rewind(s);
  7280. return 0;
  7281. }
  7282. if (stbi__get16be(s) != 3) {
  7283. stbi__rewind(s);
  7284. return 0;
  7285. }
  7286. *comp = 4;
  7287. return 1;
  7288. }
  7289. static int stbi__psd_is16(stbi__context* s) {
  7290. int channelCount, depth;
  7291. if (stbi__get32be(s) != 0x38425053) {
  7292. stbi__rewind(s);
  7293. return 0;
  7294. }
  7295. if (stbi__get16be(s) != 1) {
  7296. stbi__rewind(s);
  7297. return 0;
  7298. }
  7299. stbi__skip(s, 6);
  7300. channelCount = stbi__get16be(s);
  7301. if (channelCount < 0 || channelCount > 16) {
  7302. stbi__rewind(s);
  7303. return 0;
  7304. }
  7305. STBI_NOTUSED(stbi__get32be(s));
  7306. STBI_NOTUSED(stbi__get32be(s));
  7307. depth = stbi__get16be(s);
  7308. if (depth != 16) {
  7309. stbi__rewind(s);
  7310. return 0;
  7311. }
  7312. return 1;
  7313. }
  7314. #endif
  7315. #ifndef STBI_NO_PIC
  7316. static int stbi__pic_info(stbi__context* s, int* x, int* y, int* comp) {
  7317. int act_comp = 0, num_packets = 0, chained, dummy;
  7318. stbi__pic_packet packets[10];
  7319. if (!x)
  7320. x = &dummy;
  7321. if (!y)
  7322. y = &dummy;
  7323. if (!comp)
  7324. comp = &dummy;
  7325. if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) {
  7326. stbi__rewind(s);
  7327. return 0;
  7328. }
  7329. stbi__skip(s, 88);
  7330. *x = stbi__get16be(s);
  7331. *y = stbi__get16be(s);
  7332. if (stbi__at_eof(s)) {
  7333. stbi__rewind(s);
  7334. return 0;
  7335. }
  7336. if ((*x) != 0 && (1 << 28) / (*x) < (*y)) {
  7337. stbi__rewind(s);
  7338. return 0;
  7339. }
  7340. stbi__skip(s, 8);
  7341. do {
  7342. stbi__pic_packet* packet;
  7343. if (num_packets == sizeof(packets) / sizeof(packets[0]))
  7344. return 0;
  7345. packet = &packets[num_packets++];
  7346. chained = stbi__get8(s);
  7347. packet->size = stbi__get8(s);
  7348. packet->type = stbi__get8(s);
  7349. packet->channel = stbi__get8(s);
  7350. act_comp |= packet->channel;
  7351. if (stbi__at_eof(s)) {
  7352. stbi__rewind(s);
  7353. return 0;
  7354. }
  7355. if (packet->size != 8) {
  7356. stbi__rewind(s);
  7357. return 0;
  7358. }
  7359. } while (chained);
  7360. *comp = (act_comp & 0x10 ? 4 : 3);
  7361. return 1;
  7362. }
  7363. #endif
  7364. // *************************************************************************************************
  7365. // Portable Gray Map and Portable Pixel Map loader
  7366. // by Ken Miller
  7367. //
  7368. // PGM: http://netpbm.sourceforge.net/doc/pgm.html
  7369. // PPM: http://netpbm.sourceforge.net/doc/ppm.html
  7370. //
  7371. // Known limitations:
  7372. // Does not support comments in the header section
  7373. // Does not support ASCII image data (formats P2 and P3)
  7374. #ifndef STBI_NO_PNM
  7375. static int stbi__pnm_test(stbi__context* s) {
  7376. char p, t;
  7377. p = (char)stbi__get8(s);
  7378. t = (char)stbi__get8(s);
  7379. if (p != 'P' || (t != '5' && t != '6')) {
  7380. stbi__rewind(s);
  7381. return 0;
  7382. }
  7383. return 1;
  7384. }
  7385. static void* stbi__pnm_load(
  7386. stbi__context* s, int* x, int* y, int* comp, int req_comp,
  7387. stbi__result_info* ri) {
  7388. stbi_uc* out;
  7389. STBI_NOTUSED(ri);
  7390. ri->bits_per_channel =
  7391. stbi__pnm_info(s, (int*)&s->img_x, (int*)&s->img_y, (int*)&s->img_n);
  7392. if (ri->bits_per_channel == 0)
  7393. return 0;
  7394. if (s->img_y > STBI_MAX_DIMENSIONS)
  7395. return stbi__errpuc("too large", "Very large image (corrupt?)");
  7396. if (s->img_x > STBI_MAX_DIMENSIONS)
  7397. return stbi__errpuc("too large", "Very large image (corrupt?)");
  7398. *x = s->img_x;
  7399. *y = s->img_y;
  7400. if (comp)
  7401. *comp = s->img_n;
  7402. if (!stbi__mad4sizes_valid(
  7403. s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0))
  7404. return stbi__errpuc("too large", "PNM too large");
  7405. out = (stbi_uc*)stbi__malloc_mad4(
  7406. s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0);
  7407. if (!out)
  7408. return stbi__errpuc("outofmem", "Out of memory");
  7409. stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8));
  7410. if (req_comp && req_comp != s->img_n) {
  7411. out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y);
  7412. if (out == NULL)
  7413. return out; // stbi__convert_format frees input on failure
  7414. }
  7415. return out;
  7416. }
  7417. static int stbi__pnm_isspace(char c) {
  7418. return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r';
  7419. }
  7420. static void stbi__pnm_skip_whitespace(stbi__context* s, char* c) {
  7421. for (;;) {
  7422. while (!stbi__at_eof(s) && stbi__pnm_isspace(*c))
  7423. *c = (char)stbi__get8(s);
  7424. if (stbi__at_eof(s) || *c != '#')
  7425. break;
  7426. while (!stbi__at_eof(s) && *c != '\n' && *c != '\r')
  7427. *c = (char)stbi__get8(s);
  7428. }
  7429. }
  7430. static int stbi__pnm_isdigit(char c) {
  7431. return c >= '0' && c <= '9';
  7432. }
  7433. static int stbi__pnm_getinteger(stbi__context* s, char* c) {
  7434. int value = 0;
  7435. while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) {
  7436. value = value * 10 + (*c - '0');
  7437. *c = (char)stbi__get8(s);
  7438. }
  7439. return value;
  7440. }
  7441. static int stbi__pnm_info(stbi__context* s, int* x, int* y, int* comp) {
  7442. int maxv, dummy;
  7443. char c, p, t;
  7444. if (!x)
  7445. x = &dummy;
  7446. if (!y)
  7447. y = &dummy;
  7448. if (!comp)
  7449. comp = &dummy;
  7450. stbi__rewind(s);
  7451. // Get identifier
  7452. p = (char)stbi__get8(s);
  7453. t = (char)stbi__get8(s);
  7454. if (p != 'P' || (t != '5' && t != '6')) {
  7455. stbi__rewind(s);
  7456. return 0;
  7457. }
  7458. *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm
  7459. c = (char)stbi__get8(s);
  7460. stbi__pnm_skip_whitespace(s, &c);
  7461. *x = stbi__pnm_getinteger(s, &c); // read width
  7462. stbi__pnm_skip_whitespace(s, &c);
  7463. *y = stbi__pnm_getinteger(s, &c); // read height
  7464. stbi__pnm_skip_whitespace(s, &c);
  7465. maxv = stbi__pnm_getinteger(s, &c); // read max value
  7466. if (maxv > 65535)
  7467. return stbi__err(
  7468. "max value > 65535", "PPM image supports only 8-bit and 16-bit images");
  7469. else if (maxv > 255)
  7470. return 16;
  7471. else
  7472. return 8;
  7473. }
  7474. static int stbi__pnm_is16(stbi__context* s) {
  7475. if (stbi__pnm_info(s, NULL, NULL, NULL) == 16)
  7476. return 1;
  7477. return 0;
  7478. }
  7479. #endif
  7480. static int stbi__info_main(stbi__context* s, int* x, int* y, int* comp) {
  7481. #ifndef STBI_NO_JPEG
  7482. if (stbi__jpeg_info(s, x, y, comp))
  7483. return 1;
  7484. #endif
  7485. #ifndef STBI_NO_PNG
  7486. if (stbi__png_info(s, x, y, comp))
  7487. return 1;
  7488. #endif
  7489. #ifndef STBI_NO_GIF
  7490. if (stbi__gif_info(s, x, y, comp))
  7491. return 1;
  7492. #endif
  7493. #ifndef STBI_NO_BMP
  7494. if (stbi__bmp_info(s, x, y, comp))
  7495. return 1;
  7496. #endif
  7497. #ifndef STBI_NO_PSD
  7498. if (stbi__psd_info(s, x, y, comp))
  7499. return 1;
  7500. #endif
  7501. #ifndef STBI_NO_PIC
  7502. if (stbi__pic_info(s, x, y, comp))
  7503. return 1;
  7504. #endif
  7505. #ifndef STBI_NO_PNM
  7506. if (stbi__pnm_info(s, x, y, comp))
  7507. return 1;
  7508. #endif
  7509. #ifndef STBI_NO_HDR
  7510. if (stbi__hdr_info(s, x, y, comp))
  7511. return 1;
  7512. #endif
  7513. // test tga last because it's a crappy test!
  7514. #ifndef STBI_NO_TGA
  7515. if (stbi__tga_info(s, x, y, comp))
  7516. return 1;
  7517. #endif
  7518. return stbi__err("unknown image type", "Image not of any known type, or corrupt");
  7519. }
  7520. static int stbi__is_16_main(stbi__context* s) {
  7521. #ifndef STBI_NO_PNG
  7522. if (stbi__png_is16(s))
  7523. return 1;
  7524. #endif
  7525. #ifndef STBI_NO_PSD
  7526. if (stbi__psd_is16(s))
  7527. return 1;
  7528. #endif
  7529. #ifndef STBI_NO_PNM
  7530. if (stbi__pnm_is16(s))
  7531. return 1;
  7532. #endif
  7533. return 0;
  7534. }
  7535. #ifndef STBI_NO_STDIO
  7536. STBIDEF int stbi_info(char const* filename, int* x, int* y, int* comp) {
  7537. FILE* f = stbi__fopen(filename, "rb");
  7538. int result;
  7539. if (!f)
  7540. return stbi__err("can't fopen", "Unable to open file");
  7541. result = stbi_info_from_file(f, x, y, comp);
  7542. fclose(f);
  7543. return result;
  7544. }
  7545. STBIDEF int stbi_info_from_file(FILE* f, int* x, int* y, int* comp) {
  7546. int r;
  7547. stbi__context s;
  7548. long pos = ftell(f);
  7549. stbi__start_file(&s, f);
  7550. r = stbi__info_main(&s, x, y, comp);
  7551. fseek(f, pos, SEEK_SET);
  7552. return r;
  7553. }
  7554. STBIDEF int stbi_is_16_bit(char const* filename) {
  7555. FILE* f = stbi__fopen(filename, "rb");
  7556. int result;
  7557. if (!f)
  7558. return stbi__err("can't fopen", "Unable to open file");
  7559. result = stbi_is_16_bit_from_file(f);
  7560. fclose(f);
  7561. return result;
  7562. }
  7563. STBIDEF int stbi_is_16_bit_from_file(FILE* f) {
  7564. int r;
  7565. stbi__context s;
  7566. long pos = ftell(f);
  7567. stbi__start_file(&s, f);
  7568. r = stbi__is_16_main(&s);
  7569. fseek(f, pos, SEEK_SET);
  7570. return r;
  7571. }
  7572. #endif // !STBI_NO_STDIO
  7573. STBIDEF int stbi_info_from_memory(
  7574. stbi_uc const* buffer, int len, int* x, int* y, int* comp) {
  7575. stbi__context s;
  7576. stbi__start_mem(&s, buffer, len);
  7577. return stbi__info_main(&s, x, y, comp);
  7578. }
  7579. STBIDEF int stbi_info_from_callbacks(
  7580. stbi_io_callbacks const* c, void* user, int* x, int* y, int* comp) {
  7581. stbi__context s;
  7582. stbi__start_callbacks(&s, (stbi_io_callbacks*)c, user);
  7583. return stbi__info_main(&s, x, y, comp);
  7584. }
  7585. STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const* buffer, int len) {
  7586. stbi__context s;
  7587. stbi__start_mem(&s, buffer, len);
  7588. return stbi__is_16_main(&s);
  7589. }
  7590. STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const* c, void* user) {
  7591. stbi__context s;
  7592. stbi__start_callbacks(&s, (stbi_io_callbacks*)c, user);
  7593. return stbi__is_16_main(&s);
  7594. }
  7595. #endif // STB_IMAGE_IMPLEMENTATION
  7596. /*
  7597. revision history:
  7598. 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform
  7599. ifdefs 2.19 (2018-02-11) fix warning 2.18 (2018-01-30) fix warnings 2.17
  7600. (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug 1-bit BMP
  7601. *_is_16_bit api
  7602. avoid warnings
  7603. 2.16 (2017-07-23) all functions have 16-bit variants;
  7604. STBI_NO_STDIO works again;
  7605. compilation fixes;
  7606. fix rounding in unpremultiply;
  7607. optimize vertical flip;
  7608. disable raw_len validation;
  7609. documentation fixes
  7610. 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode;
  7611. warning fixes; disable run-time SSE detection on gcc;
  7612. uniform handling of optional "return" values;
  7613. thread-safe initialization of zlib tables
  7614. 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs
  7615. 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now
  7616. 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes
  7617. 2.11 (2016-04-02) allocate large structures on the stack
  7618. remove white matting for transparent PSD
  7619. fix reported channel count for PNG & BMP
  7620. re-enable SSE2 in non-gcc 64-bit
  7621. support RGB-formatted JPEG
  7622. read 16-bit PNGs (only as 8-bit)
  7623. 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED
  7624. 2.09 (2016-01-16) allow comments in PNM files
  7625. 16-bit-per-pixel TGA (not bit-per-component)
  7626. info() for TGA could break due to .hdr handling
  7627. info() for BMP to shares code instead of sloppy parse
  7628. can use STBI_REALLOC_SIZED if allocator doesn't support realloc
  7629. code cleanup
  7630. 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA
  7631. 2.07 (2015-09-13) fix compiler warnings
  7632. partial animated GIF support
  7633. limited 16-bpc PSD support
  7634. #ifdef unused functions
  7635. bug with < 92 byte PIC,PNM,HDR,TGA
  7636. 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value
  7637. 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning
  7638. 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit
  7639. 2.03 (2015-04-12) extra corruption checking (mmozeiko)
  7640. stbi_set_flip_vertically_on_load (nguillemot)
  7641. fix NEON support; fix mingw support
  7642. 2.02 (2015-01-19) fix incorrect assert, fix warning
  7643. 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without
  7644. -msse2 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG 2.00 (2014-12-25)
  7645. optimize JPG, including x86 SSE2 & NEON SIMD (ryg) progressive JPEG (stb) PGM/PPM
  7646. support (Ken Miller) STBI_MALLOC,STBI_REALLOC,STBI_FREE GIF bugfix -- seemingly never
  7647. worked STBI_NO_*, STBI_ONLY_* 1.48 (2014-12-14) fix incorrectly-named assert() 1.47
  7648. (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb)
  7649. optimize PNG (ryg)
  7650. fix bug in interlaced PNG with user-specified channel count
  7651. (stb) 1.46 (2014-08-26) fix broken tRNS chunk (colorkey-style transparency) in
  7652. non-paletted PNG 1.45 (2014-08-16) fix MSVC-ARM internal compiler error by wrapping
  7653. malloc 1.44 (2014-08-07) various warning fixes from Ronny Chevalier 1.43
  7654. (2014-07-15) fix MSVC-only compiler problem in code changed in 1.42 1.42 (2014-07-09)
  7655. don't define _CRT_SECURE_NO_WARNINGS (affects user code)
  7656. fixes to stbi__cleanup_jpeg path
  7657. added STBI_ASSERT to avoid requiring assert.h
  7658. 1.41 (2014-06-25)
  7659. fix search&replace from 1.36 that messed up comments/error messages
  7660. 1.40 (2014-06-22)
  7661. fix gcc struct-initialization warning
  7662. 1.39 (2014-06-15)
  7663. fix to TGA optimization when req_comp != number of components in TGA;
  7664. fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my
  7665. test suite) add support for BMP version 5 (more ignored fields) 1.38 (2014-06-06)
  7666. suppress MSVC warnings on integer casts truncating values
  7667. fix accidental rename of 'skip' field of I/O
  7668. 1.37 (2014-06-04)
  7669. remove duplicate typedef
  7670. 1.36 (2014-06-03)
  7671. convert to header file single-file library
  7672. if de-iphone isn't set, load iphone images color-swapped instead of
  7673. returning NULL 1.35 (2014-05-27) various warnings fix broken STBI_SIMD path fix bug
  7674. where stbi_load_from_file no longer left file pointer in correct place fix broken
  7675. non-easy path for 32-bit BMP (possibly never used) TGA optimization by Arseny
  7676. Kapoulkine 1.34 (unknown) use STBI_NOTUSED in stbi__resample_row_generic(), fix one
  7677. more leak in tga failure case 1.33 (2011-07-14) make stbi_is_hdr work in STBI_NO_HDR
  7678. (as specified), minor compiler-friendly improvements 1.32 (2011-07-13) support for
  7679. "info" function for all supported filetypes (SpartanJ) 1.31 (2011-06-20) a few more
  7680. leak fixes, bug in PNG handling (SpartanJ) 1.30 (2011-06-11) added ability to load
  7681. files via callbacks to accomidate custom input streams (Ben Wenger) removed
  7682. deprecated format-specific test/load functions removed support for installable file
  7683. formats (stbi_loader) -- would have been broken for IO callbacks anyway error cases
  7684. in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) fix
  7685. inefficiency in decoding 32-bit BMP (David Woo) 1.29 (2010-08-16) various warning
  7686. fixes from Aurelien Pocheville 1.28 (2010-08-01) fix bug in GIF palette transparency
  7687. (SpartanJ) 1.27 (2010-08-01) cast-to-stbi_uc to fix warnings 1.26 (2010-07-24) fix
  7688. bug in file buffering for PNG reported by SpartanJ 1.25 (2010-07-17) refix
  7689. trans_data warning (Won Chun) 1.24 (2010-07-12) perf improvements reading from files
  7690. on platforms with lock-heavy fgetc() minor perf improvements for jpeg deprecated
  7691. type-specific functions so we'll get feedback if they're needed attempt to fix
  7692. trans_data warning (Won Chun) 1.23 fixed bug in iPhone support 1.22 (2010-07-10)
  7693. removed image *writing* support
  7694. stbi_info support from Jetro Lauha
  7695. GIF support from Jean-Marc Lienher
  7696. iPhone PNG-extensions from James Brown
  7697. warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez
  7698. (U+017D)emva) 1.21 fix use of 'stbi_uc' in header (reported by jon blow) 1.20
  7699. added support for Softimage PIC, by Tom Seddon 1.19 bug in interlaced PNG
  7700. corruption check (found by ryg) 1.18 (2008-08-02) fix a threading bug (local mutable
  7701. static) 1.17 support interlaced PNG 1.16 major bugfix - stbi__convert_format
  7702. converted one too many pixels 1.15 initialize some fields for thread safety 1.14
  7703. fix threadsafe conversion bug header-file-only version (#define STBI_HEADER_FILE_ONLY
  7704. before including) 1.13 threadsafe 1.12 const qualifiers in the API 1.11 Support
  7705. installable IDCT, colorspace conversion routines 1.10 Fixes for 64-bit (don't use
  7706. "unsigned long") optimized upsampling by Fabian "ryg" Giesen 1.09 Fix
  7707. format-conversion for PSD code (bad global variables!) 1.08 Thatcher Ulrich's PSD
  7708. code integrated by Nicolas Schulz 1.07 attempt to fix C++ warning/errors
  7709. again 1.06 attempt to fix C++ warning/errors again 1.05 fix TGA loading to return
  7710. correct *comp and use good luminance calc 1.04 default float alpha is 1, not 255;
  7711. use 'void *' for stbi_image_free 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR 1.02
  7712. support for (subset of) HDR files, float interface for preferred access to them 1.01
  7713. fix bug: possible bug in handling right-side up bmps... not sure fix bug: the
  7714. stbi__bmp_load() and stbi__tga_load() functions didn't work at all 1.00 interface
  7715. to zlib that skips zlib header 0.99 correct handling of alpha in palette 0.98 TGA
  7716. loader by lonesock; dynamically add loaders (untested) 0.97 jpeg errors on too
  7717. large a file; also catch another malloc failure 0.96 fix detection of invalid v
  7718. value - particleman@mollyrocket forum 0.95 during header scan, seek to markers in
  7719. case of padding 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the
  7720. same 0.93 handle jpegtran output; verbose errors 0.92 read 4,8,16,24,32-bit BMP
  7721. files of several formats 0.91 output 24-bit Windows 3.0 BMP files 0.90 fix a
  7722. few more warnings; bump version number to approach 1.0 0.61 bugfixes due to Marc
  7723. LeBlanc, Christopher Lloyd 0.60 fix compiling as c++ 0.59 fix warnings: merge
  7724. Dave Moore's -Wall fixes 0.58 fix bug: zlib uncompressed mode len/nlen was wrong
  7725. endian 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less
  7726. than 16 available 0.56 fix bug: zlib uncompressed mode len vs. nlen 0.55 fix
  7727. bug: restart_interval not initialized to 0 0.54 allow NULL for 'int *comp' 0.53
  7728. fix bug in png 3->4; speedup png decoding 0.52 png handles req_comp=3,4 directly;
  7729. minor cleanup; jpeg comments 0.51 obey req_comp requests, 1-component jpegs return
  7730. as 1-component, on 'test' only check type, not whether we support this variant 0.50
  7731. (2006-11-19) first released version
  7732. */
  7733. /*
  7734. ------------------------------------------------------------------------------
  7735. This software is available under 2 licenses -- choose whichever you prefer.
  7736. ------------------------------------------------------------------------------
  7737. ALTERNATIVE A - MIT License
  7738. Copyright (c) 2017 Sean Barrett
  7739. Permission is hereby granted, free of charge, to any person obtaining a copy of
  7740. this software and associated documentation files (the "Software"), to deal in
  7741. the Software without restriction, including without limitation the rights to
  7742. use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  7743. of the Software, and to permit persons to whom the Software is furnished to do
  7744. so, subject to the following conditions:
  7745. The above copyright notice and this permission notice shall be included in all
  7746. copies or substantial portions of the Software.
  7747. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  7748. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  7749. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  7750. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  7751. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  7752. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  7753. SOFTWARE.
  7754. ------------------------------------------------------------------------------
  7755. ALTERNATIVE B - Public Domain (www.unlicense.org)
  7756. This is free and unencumbered software released into the public domain.
  7757. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
  7758. software, either in source code form or as a compiled binary, for any purpose,
  7759. commercial or non-commercial, and by any means.
  7760. In jurisdictions that recognize copyright laws, the author or authors of this
  7761. software dedicate any and all copyright interest in the software to the public
  7762. domain. We make this dedication for the benefit of the public at large and to
  7763. the detriment of our heirs and successors. We intend this dedication to be an
  7764. overt act of relinquishment in perpetuity of all present and future rights to
  7765. this software under copyright law.
  7766. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  7767. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  7768. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  7769. AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  7770. ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  7771. WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  7772. ------------------------------------------------------------------------------
  7773. */